summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAli Ijaz Sheikh <ofrobots@google.com>2015-11-30 21:22:40 -0800
committerAli Ijaz Sheikh <ofrobots@google.com>2015-12-04 00:06:01 -0800
commit8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02 (patch)
tree8698af91526d0eac90840dcba1e5b565160105c4
parent8a2acd4cc9807510786b4b6f7ad3a947aeb3a14c (diff)
downloadandroid-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.tar.gz
android-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.tar.bz2
android-node-v8-8a43a3d7619fde59f0d1f2fad05d8ae7d1732b02.zip
deps: upgrade V8 to 4.7.80.24
Pick up the latest branch head for V8 4.7: https://github.com/v8/v8/commit/be169f8df059040e6a53ec1dd4579d8bca2167b5 Full change history for the 4.7 branch: https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.7 V8 blog post about what is new on V8 4.7: http://v8project.blogspot.de/2015/10/v8-release-47.html PR-URL: https://github.com/nodejs/node/pull/4106 Reviewed-By: bnoordhuis - Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: targos - Michaƫl Zasso <mic.besace@gmail.com> Reviewed-By: rvagg - Rod Vagg <rod@vagg.org>
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS4
-rw-r--r--deps/v8/BUILD.gn212
-rw-r--r--deps/v8/ChangeLog1209
-rw-r--r--deps/v8/DEPS19
-rw-r--r--deps/v8/Makefile6
-rw-r--r--deps/v8/PRESUBMIT.py5
-rw-r--r--deps/v8/README.md7
-rwxr-xr-xdeps/v8/build/download_gold_plugin.py30
-rw-r--r--deps/v8/build/features.gypi6
-rw-r--r--deps/v8/build/isolate.gypi74
-rw-r--r--deps/v8/build/standalone.gypi43
-rw-r--r--deps/v8/build/toolchain.gypi35
-rw-r--r--deps/v8/docs/arm_debugging_with_the_simulator.md205
-rw-r--r--deps/v8/docs/becoming_v8_committer.md38
-rw-r--r--deps/v8/docs/building_with_gyp.md260
-rw-r--r--deps/v8/docs/contributing.md32
-rw-r--r--deps/v8/docs/cross_compiling_for_arm.md151
-rw-r--r--deps/v8/docs/d8_on_android.md101
-rw-r--r--deps/v8/docs/debugger_protocol.md934
-rw-r--r--deps/v8/docs/gdb_jit_interface.md63
-rw-r--r--deps/v8/docs/handling_of_ports.md24
-rw-r--r--deps/v8/docs/i18n_support.md44
-rw-r--r--deps/v8/docs/javascript.md6
-rw-r--r--deps/v8/docs/javascript_stack_trace_api.md161
-rw-r--r--deps/v8/docs/merging_and_patching.md66
-rw-r--r--deps/v8/docs/profiling_chromium_with_v8.md34
-rw-r--r--deps/v8/docs/release_process.md57
-rw-r--r--deps/v8/docs/runtime_functions.md14
-rw-r--r--deps/v8/docs/source.md39
-rw-r--r--deps/v8/docs/testing.md58
-rw-r--r--deps/v8/docs/triaging_issues.md22
-rw-r--r--deps/v8/docs/using_git.md147
-rw-r--r--deps/v8/docs/v8_c_plus_plus_styleand_sops.md7
-rw-r--r--deps/v8/docs/v8_committers_responsibility.md41
-rw-r--r--deps/v8/docs/v8_profiler.md141
-rw-r--r--deps/v8/include/v8-debug.h2
-rw-r--r--deps/v8/include/v8-profiler.h2
-rw-r--r--deps/v8/include/v8-testing.h2
-rw-r--r--deps/v8/include/v8-util.h2
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8.h42
-rw-r--r--deps/v8/infra/config/cq.cfg2
-rw-r--r--deps/v8/snapshot_toolchain.gni44
-rw-r--r--deps/v8/src/DEPS8
-rw-r--r--deps/v8/src/accessors.cc42
-rw-r--r--deps/v8/src/api-natives.cc57
-rw-r--r--deps/v8/src/api-natives.h5
-rw-r--r--deps/v8/src/api.cc341
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h6
-rw-r--r--deps/v8/src/arm/assembler-arm.cc35
-rw-r--r--deps/v8/src/arm/assembler-arm.h15
-rw-r--r--deps/v8/src/arm/builtins-arm.cc874
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1157
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h4
-rw-r--r--deps/v8/src/arm/codegen-arm.cc12
-rw-r--r--deps/v8/src/arm/cpu-arm.cc19
-rw-r--r--deps/v8/src/arm/frames-arm.cc1
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc51
-rw-r--r--deps/v8/src/arm/lithium-arm.cc60
-rw-r--r--deps/v8/src/arm/lithium-arm.h81
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc339
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h14
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc4
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc280
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h81
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h2
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc1
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h2
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc863
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc1041
-rw-r--r--deps/v8/src/arm64/codegen-arm64.cc4
-rw-r--r--deps/v8/src/arm64/cpu-arm64.cc14
-rw-r--r--deps/v8/src/arm64/frames-arm64.cc2
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc55
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc63
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h84
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc292
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h1
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc202
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h63
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc9
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h11
-rw-r--r--deps/v8/src/array-iterator.js95
-rw-r--r--deps/v8/src/array.js283
-rw-r--r--deps/v8/src/arraybuffer.js6
-rw-r--r--deps/v8/src/assembler.cc49
-rw-r--r--deps/v8/src/assembler.h8
-rw-r--r--deps/v8/src/assert-scope.cc2
-rw-r--r--deps/v8/src/assert-scope.h9
-rw-r--r--deps/v8/src/ast-expression-visitor.cc356
-rw-r--r--deps/v8/src/ast-expression-visitor.h49
-rw-r--r--deps/v8/src/ast-literal-reindexer.cc9
-rw-r--r--deps/v8/src/ast-literal-reindexer.h1
-rw-r--r--deps/v8/src/ast-numbering.cc29
-rw-r--r--deps/v8/src/ast-value-factory.h71
-rw-r--r--deps/v8/src/ast.cc219
-rw-r--r--deps/v8/src/ast.h377
-rw-r--r--deps/v8/src/atomic-utils.h174
-rw-r--r--deps/v8/src/bailout-reason.h5
-rw-r--r--deps/v8/src/base.isolate15
-rw-r--r--deps/v8/src/base/atomicops_internals_x86_gcc.cc2
-rw-r--r--deps/v8/src/base/bits.h8
-rw-r--r--deps/v8/src/base/build_config.h4
-rw-r--r--deps/v8/src/base/platform/condition-variable.cc4
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc4
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc4
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc5
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc7
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc4
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc5
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc2
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc5
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc2
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc2
-rw-r--r--deps/v8/src/base/platform/platform.h9
-rw-r--r--deps/v8/src/base/smart-pointers.h2
-rw-r--r--deps/v8/src/bit-vector.cc2
-rw-r--r--deps/v8/src/bootstrapper.cc613
-rw-r--r--deps/v8/src/bootstrapper.h10
-rw-r--r--deps/v8/src/builtins.cc1482
-rw-r--r--deps/v8/src/builtins.h214
-rw-r--r--deps/v8/src/cancelable-task.cc2
-rw-r--r--deps/v8/src/code-factory.cc37
-rw-r--r--deps/v8/src/code-factory.h9
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc205
-rw-r--r--deps/v8/src/code-stubs.cc77
-rw-r--r--deps/v8/src/code-stubs.h296
-rw-r--r--deps/v8/src/code.h52
-rw-r--r--deps/v8/src/codegen.cc42
-rw-r--r--deps/v8/src/codegen.h1
-rw-r--r--deps/v8/src/collection-iterator.js32
-rw-r--r--deps/v8/src/collection.js64
-rw-r--r--deps/v8/src/compilation-cache.cc10
-rw-r--r--deps/v8/src/compilation-cache.h4
-rw-r--r--deps/v8/src/compilation-statistics.cc12
-rw-r--r--deps/v8/src/compiler.cc124
-rw-r--r--deps/v8/src/compiler.h53
-rw-r--r--deps/v8/src/compiler/DEPS3
-rw-r--r--deps/v8/src/compiler/access-builder.cc8
-rw-r--r--deps/v8/src/compiler/access-builder.h2
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc60
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc83
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc92
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h1
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc52
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc271
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h20
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc9
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc5
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc547
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h174
-rw-r--r--deps/v8/src/compiler/change-lowering.cc8
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h4
-rw-r--r--deps/v8/src/compiler/code-generator.cc15
-rw-r--r--deps/v8/src/compiler/common-node-cache.cc1
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc24
-rw-r--r--deps/v8/src/compiler/common-operator.cc16
-rw-r--r--deps/v8/src/compiler/common-operator.h4
-rw-r--r--deps/v8/src/compiler/control-builders.cc2
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc8
-rw-r--r--deps/v8/src/compiler/dead-code-elimination.cc4
-rw-r--r--deps/v8/src/compiler/diamond.h30
-rw-r--r--deps/v8/src/compiler/frame.h6
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc2
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc86
-rw-r--r--deps/v8/src/compiler/graph.cc11
-rw-r--r--deps/v8/src/compiler/graph.h4
-rw-r--r--deps/v8/src/compiler/greedy-allocator.cc522
-rw-r--r--deps/v8/src/compiler/greedy-allocator.h80
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc42
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h2
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc18
-rw-r--r--deps/v8/src/compiler/instruction-codes.h16
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h38
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc54
-rw-r--r--deps/v8/src/compiler/instruction-selector.h8
-rw-r--r--deps/v8/src/compiler/instruction.cc67
-rw-r--r--deps/v8/src/compiler/instruction.h14
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.cc256
-rw-r--r--deps/v8/src/compiler/interpreter-assembler.h79
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc15
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc8
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc169
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h3
-rw-r--r--deps/v8/src/compiler/js-graph.cc18
-rw-r--r--deps/v8/src/compiler/js-graph.h5
-rw-r--r--deps/v8/src/compiler/js-inlining.cc72
-rw-r--r--deps/v8/src/compiler/js-inlining.h5
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc57
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h1
-rw-r--r--deps/v8/src/compiler/js-operator.cc139
-rw-r--r--deps/v8/src/compiler/js-operator.h82
-rw-r--r--deps/v8/src/compiler/js-type-feedback-lowering.cc11
-rw-r--r--deps/v8/src/compiler/js-type-feedback.cc12
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc249
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h1
-rw-r--r--deps/v8/src/compiler/linkage.cc35
-rw-r--r--deps/v8/src/compiler/linkage.h3
-rw-r--r--deps/v8/src/compiler/live-range-separator.cc172
-rw-r--r--deps/v8/src/compiler/live-range-separator.h60
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc91
-rw-r--r--deps/v8/src/compiler/machine-operator.cc41
-rw-r--r--deps/v8/src/compiler/machine-operator.h8
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc158
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc42
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc164
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h2
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc93
-rw-r--r--deps/v8/src/compiler/node-matchers.h44
-rw-r--r--deps/v8/src/compiler/node-properties.cc8
-rw-r--r--deps/v8/src/compiler/node-properties.h26
-rw-r--r--deps/v8/src/compiler/node.cc3
-rw-r--r--deps/v8/src/compiler/node.h12
-rw-r--r--deps/v8/src/compiler/opcodes.h8
-rw-r--r--deps/v8/src/compiler/operator-properties.cc1
-rw-r--r--deps/v8/src/compiler/operator.h28
-rw-r--r--deps/v8/src/compiler/osr.cc10
-rw-r--r--deps/v8/src/compiler/pipeline.cc137
-rw-r--r--deps/v8/src/compiler/pipeline.h20
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc55
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h4
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc30
-rw-r--r--deps/v8/src/compiler/preprocess-live-ranges.cc169
-rw-r--r--deps/v8/src/compiler/preprocess-live-ranges.h35
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc161
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h293
-rw-r--r--deps/v8/src/compiler/register-allocator.cc1097
-rw-r--r--deps/v8/src/compiler/register-allocator.h377
-rw-r--r--deps/v8/src/compiler/representation-change.h22
-rw-r--r--deps/v8/src/compiler/schedule.cc8
-rw-r--r--deps/v8/src/compiler/select-lowering.cc3
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc197
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h3
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc8
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc27
-rw-r--r--deps/v8/src/compiler/simplified-operator.h1
-rw-r--r--deps/v8/src/compiler/source-position.cc1
-rw-r--r--deps/v8/src/compiler/tail-call-optimization.cc4
-rw-r--r--deps/v8/src/compiler/typer.cc1070
-rw-r--r--deps/v8/src/compiler/typer.h7
-rw-r--r--deps/v8/src/compiler/verifier.cc112
-rw-r--r--deps/v8/src/compiler/verifier.h24
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc58
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h4
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc30
-rw-r--r--deps/v8/src/compiler/x87/code-generator-x87.cc50
-rw-r--r--deps/v8/src/compiler/x87/instruction-codes-x87.h2
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc19
-rw-r--r--deps/v8/src/context-measure.cc6
-rw-r--r--deps/v8/src/contexts-inl.h148
-rw-r--r--deps/v8/src/contexts.cc124
-rw-r--r--deps/v8/src/contexts.h426
-rw-r--r--deps/v8/src/conversions-inl.h1
-rw-r--r--deps/v8/src/conversions.cc5
-rw-r--r--deps/v8/src/counters.cc3
-rw-r--r--deps/v8/src/counters.h5
-rw-r--r--deps/v8/src/d8.cc11
-rw-r--r--deps/v8/src/d8.gyp21
-rw-r--r--deps/v8/src/d8.isolate16
-rw-r--r--deps/v8/src/date.cc2
-rw-r--r--deps/v8/src/date.js27
-rw-r--r--deps/v8/src/dateparser-inl.h28
-rw-r--r--deps/v8/src/dateparser.h24
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc11
-rw-r--r--deps/v8/src/debug/debug-frames.cc5
-rw-r--r--deps/v8/src/debug/debug-scopes.cc131
-rw-r--r--deps/v8/src/debug/debug-scopes.h3
-rw-r--r--deps/v8/src/debug/debug.cc237
-rw-r--r--deps/v8/src/debug/debug.h29
-rw-r--r--deps/v8/src/debug/liveedit.cc7
-rw-r--r--deps/v8/src/debug/mirrors.js23
-rw-r--r--deps/v8/src/deoptimizer.cc12
-rw-r--r--deps/v8/src/disassembler.cc2
-rw-r--r--deps/v8/src/dtoa.h2
-rw-r--r--deps/v8/src/effects.h6
-rw-r--r--deps/v8/src/elements-kind.h14
-rw-r--r--deps/v8/src/elements.cc748
-rw-r--r--deps/v8/src/elements.h42
-rw-r--r--deps/v8/src/execution.cc291
-rw-r--r--deps/v8/src/execution.h83
-rw-r--r--deps/v8/src/expression-classifier.h73
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc4
-rw-r--r--deps/v8/src/extensions/free-buffer-extension.cc2
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc4
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.cc4
-rw-r--r--deps/v8/src/factory.cc231
-rw-r--r--deps/v8/src/factory.h101
-rw-r--r--deps/v8/src/fast-dtoa.h2
-rw-r--r--deps/v8/src/fixed-dtoa.h2
-rw-r--r--deps/v8/src/flag-definitions.h59
-rw-r--r--deps/v8/src/flags.cc15
-rw-r--r--deps/v8/src/frames-inl.h1
-rw-r--r--deps/v8/src/frames.cc94
-rw-r--r--deps/v8/src/frames.h26
-rw-r--r--deps/v8/src/full-codegen/OWNERS2
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc649
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc659
-rw-r--r--deps/v8/src/full-codegen/full-codegen.cc134
-rw-r--r--deps/v8/src/full-codegen/full-codegen.h124
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc639
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc655
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc655
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc654
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc633
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc637
-rw-r--r--deps/v8/src/futex-emulation.cc89
-rw-r--r--deps/v8/src/futex-emulation.h9
-rw-r--r--deps/v8/src/generator.js6
-rw-r--r--deps/v8/src/global-handles.cc5
-rw-r--r--deps/v8/src/globals.h13
-rw-r--r--deps/v8/src/handles-inl.h6
-rw-r--r--deps/v8/src/handles.cc5
-rw-r--r--deps/v8/src/handles.h53
-rw-r--r--deps/v8/src/harmony-array-includes.js4
-rw-r--r--deps/v8/src/harmony-array.js50
-rw-r--r--deps/v8/src/harmony-atomics.js45
-rw-r--r--deps/v8/src/harmony-concat-spreadable.js5
-rw-r--r--deps/v8/src/harmony-object.js56
-rw-r--r--deps/v8/src/harmony-reflect.js6
-rw-r--r--deps/v8/src/harmony-regexp.js7
-rw-r--r--deps/v8/src/harmony-sharedarraybuffer.js3
-rw-r--r--deps/v8/src/harmony-simd.js537
-rw-r--r--deps/v8/src/harmony-spread.js12
-rw-r--r--deps/v8/src/harmony-tostring.js3
-rw-r--r--deps/v8/src/harmony-typedarray.js13
-rw-r--r--deps/v8/src/heap/OWNERS1
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc138
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h73
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc104
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h79
-rw-r--r--deps/v8/src/heap/gc-tracer.cc12
-rw-r--r--deps/v8/src/heap/gc-tracer.h3
-rw-r--r--deps/v8/src/heap/heap-inl.h172
-rw-r--r--deps/v8/src/heap/heap.cc1804
-rw-r--r--deps/v8/src/heap/heap.h2434
-rw-r--r--deps/v8/src/heap/incremental-marking-inl.h1
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc145
-rw-r--r--deps/v8/src/heap/incremental-marking-job.h81
-rw-r--r--deps/v8/src/heap/incremental-marking.cc61
-rw-r--r--deps/v8/src/heap/incremental-marking.h29
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h21
-rw-r--r--deps/v8/src/heap/mark-compact.cc1091
-rw-r--r--deps/v8/src/heap/mark-compact.h249
-rw-r--r--deps/v8/src/heap/memory-reducer.cc28
-rw-r--r--deps/v8/src/heap/object-stats.cc258
-rw-r--r--deps/v8/src/heap/object-stats.h102
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h51
-rw-r--r--deps/v8/src/heap/objects-visiting.cc6
-rw-r--r--deps/v8/src/heap/objects-visiting.h19
-rw-r--r--deps/v8/src/heap/scavenge-job.cc116
-rw-r--r--deps/v8/src/heap/scavenge-job.h80
-rw-r--r--deps/v8/src/heap/scavenger-inl.h51
-rw-r--r--deps/v8/src/heap/scavenger.cc496
-rw-r--r--deps/v8/src/heap/scavenger.h72
-rw-r--r--deps/v8/src/heap/slots-buffer.cc161
-rw-r--r--deps/v8/src/heap/slots-buffer.h175
-rw-r--r--deps/v8/src/heap/spaces-inl.h9
-rw-r--r--deps/v8/src/heap/spaces.cc367
-rw-r--r--deps/v8/src/heap/spaces.h734
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h14
-rw-r--r--deps/v8/src/heap/store-buffer.cc65
-rw-r--r--deps/v8/src/heap/store-buffer.h31
-rw-r--r--deps/v8/src/hydrogen-dce.cc1
-rw-r--r--deps/v8/src/hydrogen-instructions.cc147
-rw-r--r--deps/v8/src/hydrogen-instructions.h294
-rw-r--r--deps/v8/src/hydrogen-sce.cc1
-rw-r--r--deps/v8/src/hydrogen.cc617
-rw-r--r--deps/v8/src/hydrogen.h71
-rw-r--r--deps/v8/src/i18n.js23
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h10
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc4
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h3
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc892
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc1211
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h2
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc10
-rw-r--r--deps/v8/src/ia32/frames-ia32.cc1
-rw-r--r--deps/v8/src/ia32/frames-ia32.h1
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc67
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc295
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h14
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc60
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h83
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc208
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h84
-rw-r--r--deps/v8/src/ia32/simulator-ia32.cc1
-rw-r--r--deps/v8/src/ic/access-compiler.cc20
-rw-r--r--deps/v8/src/ic/access-compiler.h9
-rw-r--r--deps/v8/src/ic/arm/access-compiler-arm.cc2
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc28
-rw-r--r--deps/v8/src/ic/arm/ic-arm.cc24
-rw-r--r--deps/v8/src/ic/arm/ic-compiler-arm.cc5
-rw-r--r--deps/v8/src/ic/arm/stub-cache-arm.cc10
-rw-r--r--deps/v8/src/ic/arm64/access-compiler-arm64.cc2
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc29
-rw-r--r--deps/v8/src/ic/arm64/ic-arm64.cc16
-rw-r--r--deps/v8/src/ic/arm64/ic-compiler-arm64.cc5
-rw-r--r--deps/v8/src/ic/arm64/stub-cache-arm64.cc10
-rw-r--r--deps/v8/src/ic/handler-compiler.cc43
-rw-r--r--deps/v8/src/ic/handler-compiler.h1
-rw-r--r--deps/v8/src/ic/ia32/access-compiler-ia32.cc3
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc42
-rw-r--r--deps/v8/src/ic/ia32/ic-compiler-ia32.cc5
-rw-r--r--deps/v8/src/ic/ia32/ic-ia32.cc26
-rw-r--r--deps/v8/src/ic/ia32/stub-cache-ia32.cc50
-rw-r--r--deps/v8/src/ic/ic-compiler.cc109
-rw-r--r--deps/v8/src/ic/ic-compiler.h15
-rw-r--r--deps/v8/src/ic/ic-inl.h1
-rw-r--r--deps/v8/src/ic/ic-state.cc20
-rw-r--r--deps/v8/src/ic/ic-state.h2
-rw-r--r--deps/v8/src/ic/ic.cc515
-rw-r--r--deps/v8/src/ic/ic.h32
-rw-r--r--deps/v8/src/ic/mips/access-compiler-mips.cc2
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc28
-rw-r--r--deps/v8/src/ic/mips/ic-compiler-mips.cc5
-rw-r--r--deps/v8/src/ic/mips/ic-mips.cc14
-rw-r--r--deps/v8/src/ic/mips/stub-cache-mips.cc10
-rw-r--r--deps/v8/src/ic/mips64/access-compiler-mips64.cc2
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc30
-rw-r--r--deps/v8/src/ic/mips64/ic-compiler-mips64.cc5
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc23
-rw-r--r--deps/v8/src/ic/mips64/stub-cache-mips64.cc10
-rw-r--r--deps/v8/src/ic/ppc/access-compiler-ppc.cc2
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc28
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc5
-rw-r--r--deps/v8/src/ic/ppc/ic-ppc.cc14
-rw-r--r--deps/v8/src/ic/ppc/stub-cache-ppc.cc10
-rw-r--r--deps/v8/src/ic/x64/access-compiler-x64.cc3
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc32
-rw-r--r--deps/v8/src/ic/x64/ic-compiler-x64.cc5
-rw-r--r--deps/v8/src/ic/x64/ic-x64.cc14
-rw-r--r--deps/v8/src/ic/x64/stub-cache-x64.cc13
-rw-r--r--deps/v8/src/ic/x87/access-compiler-x87.cc3
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc42
-rw-r--r--deps/v8/src/ic/x87/ic-compiler-x87.cc5
-rw-r--r--deps/v8/src/ic/x87/ic-x87.cc26
-rw-r--r--deps/v8/src/ic/x87/stub-cache-x87.cc50
-rw-r--r--deps/v8/src/identity-map.cc (renamed from deps/v8/src/heap/identity-map.cc)2
-rw-r--r--deps/v8/src/identity-map.h (renamed from deps/v8/src/heap/identity-map.h)6
-rw-r--r--deps/v8/src/interface-descriptors.cc320
-rw-r--r--deps/v8/src/interface-descriptors.h84
-rw-r--r--deps/v8/src/interpreter/DEPS1
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc484
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h141
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.cc72
-rw-r--r--deps/v8/src/interpreter/bytecode-array-iterator.h48
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc293
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h10
-rw-r--r--deps/v8/src/interpreter/bytecodes.cc67
-rw-r--r--deps/v8/src/interpreter/bytecodes.h123
-rw-r--r--deps/v8/src/interpreter/interpreter.cc342
-rw-r--r--deps/v8/src/interpreter/interpreter.h20
-rw-r--r--deps/v8/src/isolate-inl.h104
-rw-r--r--deps/v8/src/isolate.cc109
-rw-r--r--deps/v8/src/isolate.h111
-rw-r--r--deps/v8/src/iterator-prototype.js5
-rw-r--r--deps/v8/src/json-parser.h13
-rw-r--r--deps/v8/src/json-stringifier.h6
-rw-r--r--deps/v8/src/json.js33
-rw-r--r--deps/v8/src/layout-descriptor.cc6
-rw-r--r--deps/v8/src/list-inl.h2
-rw-r--r--deps/v8/src/lithium-allocator.cc11
-rw-r--r--deps/v8/src/lithium-codegen.cc2
-rw-r--r--deps/v8/src/lithium.cc6
-rw-r--r--deps/v8/src/log-inl.h5
-rw-r--r--deps/v8/src/log-utils.h2
-rw-r--r--deps/v8/src/log.cc13
-rw-r--r--deps/v8/src/lookup.cc10
-rw-r--r--deps/v8/src/macro-assembler.h74
-rw-r--r--deps/v8/src/macros.py31
-rw-r--r--deps/v8/src/math.js3
-rw-r--r--deps/v8/src/messages.cc112
-rw-r--r--deps/v8/src/messages.h37
-rw-r--r--deps/v8/src/messages.js175
-rw-r--r--deps/v8/src/mips/assembler-mips.cc9
-rw-r--r--deps/v8/src/mips/assembler-mips.h3
-rw-r--r--deps/v8/src/mips/builtins-mips.cc886
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1169
-rw-r--r--deps/v8/src/mips/codegen-mips.cc2
-rw-r--r--deps/v8/src/mips/constants-mips.cc145
-rw-r--r--deps/v8/src/mips/constants-mips.h138
-rw-r--r--deps/v8/src/mips/disasm-mips.cc2
-rw-r--r--deps/v8/src/mips/frames-mips.cc1
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc49
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc339
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h21
-rw-r--r--deps/v8/src/mips/lithium-mips.cc60
-rw-r--r--deps/v8/src/mips/lithium-mips.h81
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc290
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h86
-rw-r--r--deps/v8/src/mips/simulator-mips.cc1723
-rw-r--r--deps/v8/src/mips/simulator-mips.h100
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc6
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h13
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc892
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc1198
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc344
-rw-r--r--deps/v8/src/mips64/constants-mips64.cc185
-rw-r--r--deps/v8/src/mips64/constants-mips64.h176
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc2
-rw-r--r--deps/v8/src/mips64/frames-mips64.cc1
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc49
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.cc339
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.h20
-rw-r--r--deps/v8/src/mips64/lithium-mips64.cc60
-rw-r--r--deps/v8/src/mips64/lithium-mips64.h81
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc459
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h99
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc1859
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h103
-rw-r--r--deps/v8/src/object-observe.js39
-rw-r--r--deps/v8/src/objects-debug.cc44
-rw-r--r--deps/v8/src/objects-inl.h429
-rw-r--r--deps/v8/src/objects-printer.cc93
-rw-r--r--deps/v8/src/objects.cc1698
-rw-r--r--deps/v8/src/objects.h708
-rw-r--r--deps/v8/src/optimizing-compile-dispatcher.cc3
-rw-r--r--deps/v8/src/parser.cc519
-rw-r--r--deps/v8/src/parser.h41
-rw-r--r--deps/v8/src/pattern-rewriter.cc33
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h4
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc2
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc916
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc1224
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.h5
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc8
-rw-r--r--deps/v8/src/ppc/cpu-ppc.cc18
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc1
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc48
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc330
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.h10
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc57
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h79
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc383
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h80
-rw-r--r--deps/v8/src/preparser.cc61
-rw-r--r--deps/v8/src/preparser.h313
-rw-r--r--deps/v8/src/prettyprinter.cc49
-rw-r--r--deps/v8/src/profiler/allocation-tracker.cc (renamed from deps/v8/src/allocation-tracker.cc)11
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h (renamed from deps/v8/src/allocation-tracker.h)4
-rw-r--r--deps/v8/src/profiler/circular-queue-inl.h (renamed from deps/v8/src/circular-queue-inl.h)8
-rw-r--r--deps/v8/src/profiler/circular-queue.h (renamed from deps/v8/src/circular-queue.h)6
-rw-r--r--deps/v8/src/profiler/cpu-profiler-inl.h (renamed from deps/v8/src/cpu-profiler-inl.h)14
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc (renamed from deps/v8/src/cpu-profiler.cc)24
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h (renamed from deps/v8/src/cpu-profiler.h)12
-rw-r--r--deps/v8/src/profiler/heap-profiler.cc (renamed from deps/v8/src/heap-profiler.cc)10
-rw-r--r--deps/v8/src/profiler/heap-profiler.h (renamed from deps/v8/src/heap-profiler.h)8
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator-inl.h (renamed from deps/v8/src/heap-snapshot-generator-inl.h)8
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc (renamed from deps/v8/src/heap-snapshot-generator.cc)17
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h (renamed from deps/v8/src/heap-snapshot-generator.h)10
-rw-r--r--deps/v8/src/profiler/profile-generator-inl.h (renamed from deps/v8/src/profile-generator-inl.h)18
-rw-r--r--deps/v8/src/profiler/profile-generator.cc (renamed from deps/v8/src/profile-generator.cc)27
-rw-r--r--deps/v8/src/profiler/profile-generator.h (renamed from deps/v8/src/profile-generator.h)11
-rw-r--r--deps/v8/src/profiler/sampler.cc (renamed from deps/v8/src/sampler.cc)96
-rw-r--r--deps/v8/src/profiler/sampler.h (renamed from deps/v8/src/sampler.h)6
-rw-r--r--deps/v8/src/profiler/unbound-queue-inl.h (renamed from deps/v8/src/unbound-queue-inl.h)8
-rw-r--r--deps/v8/src/profiler/unbound-queue.h (renamed from deps/v8/src/unbound-queue.h)6
-rw-r--r--deps/v8/src/prologue.js90
-rw-r--r--deps/v8/src/promise.js110
-rw-r--r--deps/v8/src/prototype.h20
-rw-r--r--deps/v8/src/proxy.js20
-rw-r--r--deps/v8/src/regexp.js32
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc2
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc2
-rw-r--r--deps/v8/src/regexp/jsregexp.cc94
-rw-r--r--deps/v8/src/regexp/jsregexp.h26
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc10
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc1
-rw-r--r--deps/v8/src/regexp/regexp-stack.cc1
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc2
-rw-r--r--deps/v8/src/rewriter.cc6
-rw-r--r--deps/v8/src/runtime-profiler.cc2
-rw-r--r--deps/v8/src/runtime.js686
-rw-r--r--deps/v8/src/runtime/runtime-array.cc839
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc181
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc91
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc93
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc6
-rw-r--r--deps/v8/src/runtime/runtime-date.cc10
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc27
-rw-r--r--deps/v8/src/runtime/runtime-forin.cc4
-rw-r--r--deps/v8/src/runtime/runtime-function.cc176
-rw-r--r--deps/v8/src/runtime/runtime-i18n.cc7
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc128
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc125
-rw-r--r--deps/v8/src/runtime/runtime-json.cc10
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc49
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc1
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc253
-rw-r--r--deps/v8/src/runtime/runtime-object.cc403
-rw-r--r--deps/v8/src/runtime/runtime-observe.cc1
-rw-r--r--deps/v8/src/runtime/runtime-operators.cc277
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc2
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc32
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc239
-rw-r--r--deps/v8/src/runtime/runtime-simd.cc378
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc101
-rw-r--r--deps/v8/src/runtime/runtime-symbol.cc39
-rw-r--r--deps/v8/src/runtime/runtime-test.cc10
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc75
-rw-r--r--deps/v8/src/runtime/runtime-uri.cc24
-rw-r--r--deps/v8/src/runtime/runtime.cc6
-rw-r--r--deps/v8/src/runtime/runtime.h738
-rw-r--r--deps/v8/src/scanner-character-streams.cc5
-rw-r--r--deps/v8/src/scanner-character-streams.h5
-rw-r--r--deps/v8/src/scanner.cc28
-rw-r--r--deps/v8/src/scanner.h64
-rw-r--r--deps/v8/src/scopeinfo.cc112
-rw-r--r--deps/v8/src/scopeinfo.h36
-rw-r--r--deps/v8/src/scopes.cc81
-rw-r--r--deps/v8/src/scopes.h87
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc29
-rw-r--r--deps/v8/src/snapshot/natives-common.cc8
-rw-r--r--deps/v8/src/snapshot/natives-external.cc4
-rw-r--r--deps/v8/src/snapshot/natives.h11
-rw-r--r--deps/v8/src/snapshot/serialize.cc187
-rw-r--r--deps/v8/src/snapshot/serialize.h104
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc10
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc2
-rw-r--r--deps/v8/src/snapshot/snapshot.h16
-rw-r--r--deps/v8/src/string-builder.cc1
-rw-r--r--deps/v8/src/string-iterator.js77
-rw-r--r--deps/v8/src/string-search.h97
-rw-r--r--deps/v8/src/string-stream.cc2
-rw-r--r--deps/v8/src/string-stream.h2
-rw-r--r--deps/v8/src/string.js193
-rw-r--r--deps/v8/src/strings-storage.cc4
-rw-r--r--deps/v8/src/symbol.js65
-rw-r--r--deps/v8/src/templates.js9
-rw-r--r--deps/v8/src/token.h1
-rw-r--r--deps/v8/src/transitions.cc4
-rw-r--r--deps/v8/src/type-feedback-vector-inl.h133
-rw-r--r--deps/v8/src/type-feedback-vector.cc284
-rw-r--r--deps/v8/src/type-feedback-vector.h280
-rw-r--r--deps/v8/src/type-info.cc104
-rw-r--r--deps/v8/src/type-info.h18
-rw-r--r--deps/v8/src/typedarray.js36
-rw-r--r--deps/v8/src/types-inl.h14
-rw-r--r--deps/v8/src/types.cc182
-rw-r--r--deps/v8/src/types.h103
-rw-r--r--deps/v8/src/typing-asm.cc1076
-rw-r--r--deps/v8/src/typing-asm.h95
-rw-r--r--deps/v8/src/typing-reset.cc26
-rw-r--r--deps/v8/src/typing-reset.h26
-rw-r--r--deps/v8/src/typing.cc118
-rw-r--r--deps/v8/src/typing.h15
-rw-r--r--deps/v8/src/unicode-cache-inl.h49
-rw-r--r--deps/v8/src/unicode-cache.h47
-rw-r--r--deps/v8/src/unicode.cc1
-rw-r--r--deps/v8/src/unique.h24
-rw-r--r--deps/v8/src/uri.js27
-rw-r--r--deps/v8/src/utils.h13
-rw-r--r--deps/v8/src/v8.cc13
-rw-r--r--deps/v8/src/v8.h36
-rw-r--r--deps/v8/src/v8natives.js141
-rw-r--r--deps/v8/src/v8threads.cc4
-rw-r--r--deps/v8/src/v8threads.h2
-rw-r--r--deps/v8/src/version.cc5
-rw-r--r--deps/v8/src/version.h1
-rw-r--r--deps/v8/src/weak-collection.js13
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h14
-rw-r--r--deps/v8/src/x64/builtins-x64.cc885
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc1122
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h2
-rw-r--r--deps/v8/src/x64/codegen-x64.cc8
-rw-r--r--deps/v8/src/x64/frames-x64.cc1
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc49
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc299
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h13
-rw-r--r--deps/v8/src/x64/lithium-x64.cc57
-rw-r--r--deps/v8/src/x64/lithium-x64.h73
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc179
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h54
-rw-r--r--deps/v8/src/x64/simulator-x64.cc2
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h10
-rw-r--r--deps/v8/src/x87/assembler-x87.cc6
-rw-r--r--deps/v8/src/x87/assembler-x87.h3
-rw-r--r--deps/v8/src/x87/builtins-x87.cc892
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc1213
-rw-r--r--deps/v8/src/x87/code-stubs-x87.h2
-rw-r--r--deps/v8/src/x87/codegen-x87.cc6
-rw-r--r--deps/v8/src/x87/frames-x87.cc1
-rw-r--r--deps/v8/src/x87/frames-x87.h1
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc67
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.cc302
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.h14
-rw-r--r--deps/v8/src/x87/lithium-x87.cc60
-rw-r--r--deps/v8/src/x87/lithium-x87.h83
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc207
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h84
-rw-r--r--deps/v8/src/x87/simulator-x87.cc1
-rw-r--r--deps/v8/src/zone-type-cache.h98
-rw-r--r--deps/v8/test/cctest/cctest.cc14
-rw-r--r--deps/v8/test/cctest/cctest.gyp11
-rw-r--r--deps/v8/test/cctest/cctest.h143
-rw-r--r--deps/v8/test/cctest/cctest.status10
-rw-r--r--deps/v8/test/cctest/compiler/codegen-tester.h7
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h22
-rw-r--r--deps/v8/test/cctest/compiler/graph-builder-tester.h6
-rw-r--r--deps/v8/test/cctest/compiler/test-graph-visualizer.cc28
-rw-r--r--deps/v8/test/cctest/compiler/test-js-constant-cache.cc37
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc32
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc107
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-analysis.cc9
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc5
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc255
-rw-r--r--deps/v8/test/cctest/compiler/test-osr.cc26
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-representation-change.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc258
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc58
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc37
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jscalls.cc22
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc7
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsobjects.cc47
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc150
-rw-r--r--deps/v8/test/cctest/compiler/test-run-native-calls.cc142
-rw-r--r--deps/v8/test/cctest/compiler/test-run-properties.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc12
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc123
-rw-r--r--deps/v8/test/cctest/compiler/value-helper.h40
-rw-r--r--deps/v8/test/cctest/expression-type-collector-macros.h38
-rw-r--r--deps/v8/test/cctest/expression-type-collector.cc61
-rw-r--r--deps/v8/test/cctest/expression-type-collector.h39
-rw-r--r--deps/v8/test/cctest/gay-fixed.h2
-rw-r--r--deps/v8/test/cctest/gay-precision.h2
-rw-r--r--deps/v8/test/cctest/gay-shortest.h2
-rw-r--r--deps/v8/test/cctest/heap-tester.h59
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc853
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc1172
-rw-r--r--deps/v8/test/cctest/test-alloc.cc43
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc93
-rw-r--r--deps/v8/test/cctest/test-api.cc226
-rw-r--r--deps/v8/test/cctest/test-api.h4
-rw-r--r--deps/v8/test/cctest/test-asm-validator.cc915
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc12
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc25
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc100
-rw-r--r--deps/v8/test/cctest/test-ast-expression-visitor.cc392
-rw-r--r--deps/v8/test/cctest/test-circular-queue.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm.cc4
-rw-r--r--deps/v8/test/cctest/test-code-stubs-arm64.cc4
-rw-r--r--deps/v8/test/cctest/test-code-stubs-ia32.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc10
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x64.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stubs-x87.cc2
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc2
-rw-r--r--deps/v8/test/cctest/test-date.cc23
-rw-r--r--deps/v8/test/cctest/test-debug.cc10
-rw-r--r--deps/v8/test/cctest/test-experimental-extra.js14
-rw-r--r--deps/v8/test/cctest/test-extra.js44
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc150
-rw-r--r--deps/v8/test/cctest/test-gc-tracer.cc3
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc9
-rw-r--r--deps/v8/test/cctest/test-heap.cc627
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc2
-rw-r--r--deps/v8/test/cctest/test-incremental-marking.cc168
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc2
-rw-r--r--deps/v8/test/cctest/test-log.cc2
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc17
-rw-r--r--deps/v8/test/cctest/test-mementos.cc42
-rw-r--r--deps/v8/test/cctest/test-migrations.cc10
-rw-r--r--deps/v8/test/cctest/test-parsing.cc349
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc4
-rw-r--r--deps/v8/test/cctest/test-serialize.cc2
-rw-r--r--deps/v8/test/cctest/test-simd.cc3
-rw-r--r--deps/v8/test/cctest/test-slots-buffer.cc132
-rw-r--r--deps/v8/test/cctest/test-spaces.cc198
-rw-r--r--deps/v8/test/cctest/test-strings.cc1
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc348
-rw-r--r--deps/v8/test/cctest/test-threads.cc3
-rw-r--r--deps/v8/test/cctest/test-transitions.cc3
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc10
-rw-r--r--deps/v8/test/cctest/test-types.cc3
-rw-r--r--deps/v8/test/cctest/test-typing-reset.cc301
-rw-r--r--deps/v8/test/cctest/test-unbound-queue.cc5
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc94
-rw-r--r--deps/v8/test/cctest/test-unique.cc3
-rw-r--r--deps/v8/test/cctest/test-unscopables-hidden-prototype.cc140
-rw-r--r--deps/v8/test/cctest/test-utils-arm64.cc3
-rw-r--r--deps/v8/test/cctest/test-utils.cc3
-rw-r--r--deps/v8/test/cctest/test-version.cc3
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc13
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc11
-rw-r--r--deps/v8/test/cctest/trace-extension.cc36
-rw-r--r--deps/v8/test/cctest/trace-extension.h7
-rw-r--r--deps/v8/test/cctest/types-fuzz.h17
-rw-r--r--deps/v8/test/intl/intl.status3
-rw-r--r--deps/v8/test/intl/testcfg.py4
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json16
-rw-r--r--deps/v8/test/js-perf-test/RestParameters/rest.js30
-rw-r--r--deps/v8/test/js-perf-test/RestParameters/run.js26
-rw-r--r--deps/v8/test/message/arrow-missing.out6
-rw-r--r--deps/v8/test/message/testcfg.py4
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js77
-rw-r--r--deps/v8/test/mjsunit/array-splice.js60
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-add.js2
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-and.js2
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-compareexchange.js22
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-exchange.js2
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-load.js18
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-or.js2
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-store.js20
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-sub.js2
-rw-r--r--deps/v8/test/mjsunit/asm/atomics-xor.js2
-rw-r--r--deps/v8/test/mjsunit/builtins.js7
-rw-r--r--deps/v8/test/mjsunit/compare-known-objects-slow.js32
-rw-r--r--deps/v8/test/mjsunit/compare-known-objects-tostringtag.js57
-rw-r--r--deps/v8/test/mjsunit/compare-known-objects.js32
-rw-r--r--deps/v8/test/mjsunit/compiler/jsnatives.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-1.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-2.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-3.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-4.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-5.js11
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-4389-6.js11
-rw-r--r--deps/v8/test/mjsunit/constant-fold-control-instructions.js7
-rw-r--r--deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js2
-rw-r--r--deps/v8/test/mjsunit/date.js5
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-check-stack.js2
-rw-r--r--deps/v8/test/mjsunit/es6/arguments-iterator.js15
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepnext-for.js6
-rw-r--r--deps/v8/test/mjsunit/es6/object-assign.js (renamed from deps/v8/test/mjsunit/harmony/object-assign.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/object-literals-property-shorthand.js8
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js11
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-468661.js4
-rw-r--r--deps/v8/test/mjsunit/es6/toMethod.js106
-rw-r--r--deps/v8/test/mjsunit/harmony/array-length.js208
-rw-r--r--deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js145
-rw-r--r--deps/v8/test/mjsunit/harmony/arrow-rest-params.js20
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics.js65
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js64
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/block-sloppy-function.js203
-rw-r--r--deps/v8/test/mjsunit/harmony/default-parameters.js396
-rw-r--r--deps/v8/test/mjsunit/harmony/destructuring.js152
-rw-r--r--deps/v8/test/mjsunit/harmony/futex.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-function.js24
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4211.js12
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-4400.js8
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-508074.js26
-rw-r--r--deps/v8/test/mjsunit/harmony/rest-params.js71
-rw-r--r--deps/v8/test/mjsunit/harmony/simd.js66
-rw-r--r--deps/v8/test/mjsunit/harmony/spread-call-new-class.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/spread-call-new.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/spread-call-super-property.js27
-rw-r--r--deps/v8/test/mjsunit/harmony/spread-call.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/super.js35
-rw-r--r--deps/v8/test/mjsunit/harmony/to-name.js54
-rw-r--r--deps/v8/test/mjsunit/harmony/to-number.js61
-rw-r--r--deps/v8/test/mjsunit/harmony/to-primitive.js106
-rw-r--r--deps/v8/test/mjsunit/harmony/to-string.js54
-rw-r--r--deps/v8/test/mjsunit/messages.js3
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status34
-rw-r--r--deps/v8/test/mjsunit/regress/regress-105.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-165637.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3926.js87
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4121.js84
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4173.js58
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4374.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4376-1.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4376-2.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4376-3.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4377.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4380.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4388.js35
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4399.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-520029.js29
-rw-r--r--deps/v8/test/mjsunit/regress/regress-539875.js37
-rw-r--r--deps/v8/test/mjsunit/regress/regress-544991.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-500497.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-501809.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-518748.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-522895.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523213.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523307.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-523919.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-527364.js26
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-530598.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-osr-context.js3
-rw-r--r--deps/v8/test/mjsunit/stack-traces-2.js3
-rw-r--r--deps/v8/test/mjsunit/string-indexof-1.js14
-rw-r--r--deps/v8/test/mjsunit/strong/class-literals.js159
-rw-r--r--deps/v8/test/mjsunit/strong/destructuring.js10
-rw-r--r--deps/v8/test/mjsunit/strong/function-arity.js43
-rw-r--r--deps/v8/test/mjsunit/strong/literals.js64
-rw-r--r--deps/v8/test/mjsunit/switch.js55
-rw-r--r--deps/v8/test/mjsunit/testcfg.py6
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.only-summary9
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor.js21
-rw-r--r--deps/v8/test/mozilla/mozilla.status15
-rw-r--r--deps/v8/test/mozilla/testcfg.py20
-rw-r--r--deps/v8/test/simdjs/SimdJs.json7
-rwxr-xr-xdeps/v8/test/simdjs/generate.py3
-rw-r--r--deps/v8/test/simdjs/harness-adapt.js5
-rw-r--r--deps/v8/test/simdjs/testcfg.py6
-rw-r--r--deps/v8/test/test262-es6/README18
-rw-r--r--deps/v8/test/test262-es6/harness-adapt.js91
-rw-r--r--deps/v8/test/test262-es6/test262-es6.status814
-rw-r--r--deps/v8/test/test262-es6/testcfg.py235
-rw-r--r--deps/v8/test/test262/README4
-rw-r--r--deps/v8/test/test262/harness-adapt.js9
-rw-r--r--deps/v8/test/test262/test262.status1008
-rw-r--r--deps/v8/test/test262/testcfg.py127
-rw-r--r--deps/v8/test/unittests/atomic-utils-unittest.cc217
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc75
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc45
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc248
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc51
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc10
-rw-r--r--deps/v8/test/unittests/compiler/diamond-unittest.cc16
-rw-r--r--deps/v8/test/unittests/compiler/graph-reducer-unittest.cc24
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.cc31
-rw-r--r--deps/v8/test/unittests/compiler/graph-unittest.h3
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc45
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc284
-rw-r--r--deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h5
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc3
-rw-r--r--deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc37
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc21
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc12
-rw-r--r--deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc29
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc152
-rw-r--r--deps/v8/test/unittests/compiler/live-range-builder.h78
-rw-r--r--deps/v8/test/unittests/compiler/live-range-unittest.cc441
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/loop-peeling-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc72
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc18
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc4
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc110
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h46
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc29
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/typer-unittest.cc30
-rw-r--r--deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc10
-rw-r--r--deps/v8/test/unittests/heap/bitmap-unittest.cc107
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc232
-rw-r--r--deps/v8/test/unittests/heap/scavenge-job-unittest.cc111
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc272
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc100
-rw-r--r--deps/v8/test/unittests/interpreter/bytecodes-unittest.cc63
-rw-r--r--deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc172
-rw-r--r--deps/v8/test/unittests/unittests.gyp8
-rw-r--r--deps/v8/test/webkit/fast/js/object-prototype-properties-expected.txt2
-rw-r--r--deps/v8/test/webkit/function-apply-aliased-expected.txt2
-rw-r--r--deps/v8/test/webkit/function-apply-aliased.js13
-rw-r--r--deps/v8/test/webkit/testcfg.py4
-rw-r--r--deps/v8/tools/cfi/blacklist.txt4
-rwxr-xr-xdeps/v8/tools/check-name-clashes.py118
-rwxr-xr-xdeps/v8/tools/eval_gc_nvp.py151
-rwxr-xr-xdeps/v8/tools/fuzz-harness.sh4
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py72
-rw-r--r--deps/v8/tools/gyp/v8.gyp245
-rwxr-xr-xdeps/v8/tools/js2c.py9
-rwxr-xr-xdeps/v8/tools/presubmit.py84
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py5
-rwxr-xr-xdeps/v8/tools/run-deopt-fuzzer.py1
-rwxr-xr-xdeps/v8/tools/run_perf.py12
-rw-r--r--deps/v8/tools/testrunner/local/commands.py18
-rw-r--r--deps/v8/tools/testrunner/local/progress.py1
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py2
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py2
-rw-r--r--deps/v8/tools/tickprocessor-driver.js3
-rw-r--r--deps/v8/tools/tickprocessor.js68
-rwxr-xr-xdeps/v8/tools/try_perf.py51
-rw-r--r--deps/v8/tools/unittests/run_perf_test.py18
-rw-r--r--deps/v8/tools/whitespace.txt2
978 files changed, 60649 insertions, 43392 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index cc5606e854..337a5531e8 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -80,6 +80,7 @@ shell_g
/tools/jsfunfuzz.zip
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
+/tools/swarming_client
/tools/visual_studio/Debug
/tools/visual_studio/Release
/v8.log.ll
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 72c23bcc83..9dfb07328e 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -49,6 +49,7 @@ Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
+Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
Felix Geisendƶrfer <haimuiba@gmail.com>
@@ -67,6 +68,7 @@ Johan Bergstrƶm <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
+Karl Skomski <karl@skomski.com>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com>
@@ -90,9 +92,11 @@ Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Robert Nagy <robert.nagy@gmail.com>
Ryan Dahl <ry@tinyclouds.org>
+Sakthipriyan Vairamani (thefourtheye) <thechargingvolcano@gmail.com>
Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
+Stefan Penner <stefan.penner@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index fae41a7361..9dfdaa3cda 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -5,11 +5,14 @@
import("//build/config/android/config.gni")
import("//build/config/arm.gni")
import("//build/config/mips.gni")
+import("//build/config/sanitizers/sanitizers.gni")
# Because standalone V8 builds are not supported, assume this is part of a
# Chromium build.
import("//build/module_args/v8.gni")
+import("snapshot_toolchain.gni")
+
# TODO(jochen): These will need to be user-settable to support standalone V8
# builds.
v8_deprecation_warnings = false
@@ -22,26 +25,16 @@ v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_snapshot = true
-v8_target_arch = target_cpu
v8_random_seed = "314159265"
v8_toolset_for_d8 = "host"
-# The snapshot needs to be compiled for the host, but compiled with
-# a toolchain that matches the bit-width of the target.
-#
-# TODO(GYP): For now we only support 32-bit little-endian target builds from an
-# x64 Linux host. Eventually we need to support all of the host/target
-# configurations v8 runs on.
-if (host_cpu == "x64" && host_os == "linux") {
- if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
- snapshot_toolchain = "//build/toolchain/linux:clang_x86"
- } else if (target_cpu == "x64") {
- snapshot_toolchain = "//build/toolchain/linux:clang_x64"
- } else {
- assert(false, "Need environment for this arch")
- }
+if (is_msan) {
+ # Running the V8-generated code on an ARM simulator is a powerful hack that
+ # allows the tool to see the memory accesses from JITted code. Without this
+ # flag, JS code causes false positive reports from MSan.
+ v8_target_arch = "arm64"
} else {
- snapshot_toolchain = default_toolchain
+ v8_target_arch = target_cpu
}
###############################################################################
@@ -312,7 +305,6 @@ action("js2c_experimental") {
"src/harmony-regexp.js",
"src/harmony-reflect.js",
"src/harmony-spread.js",
- "src/harmony-object.js",
"src/harmony-object-observe.js",
"src/harmony-sharedarraybuffer.js",
"src/harmony-simd.js"
@@ -367,6 +359,36 @@ action("js2c_extras") {
}
}
+action("js2c_experimental_extras") {
+ visibility = [ ":*" ] # Only targets in this file can depend on this.
+
+ script = "tools/js2c.py"
+
+ # The script depends on this other script, this rule causes a rebuild if it
+ # changes.
+ inputs = [ "tools/jsmin.py" ]
+
+ sources = v8_experimental_extra_library_files
+
+ outputs = [
+ "$target_gen_dir/experimental-extras-libraries.cc",
+ ]
+
+ args = [
+ rebase_path("$target_gen_dir/experimental-extras-libraries.cc",
+ root_build_dir),
+ "EXPERIMENTAL_EXTRAS",
+ ] + rebase_path(sources, root_build_dir)
+
+ if (v8_use_external_startup_data) {
+ outputs += [ "$target_gen_dir/libraries_experimental_extras.bin" ]
+ args += [
+ "--startup_blob",
+ rebase_path("$target_gen_dir/libraries_experimental_extras.bin", root_build_dir),
+ ]
+ }
+}
+
action("d8_js2c") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -394,6 +416,7 @@ if (v8_use_external_startup_data) {
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
+ ":js2c_experimental_extras",
]
sources = [
@@ -401,6 +424,7 @@ if (v8_use_external_startup_data) {
"$target_gen_dir/libraries_code_stub.bin",
"$target_gen_dir/libraries_experimental.bin",
"$target_gen_dir/libraries_extras.bin",
+ "$target_gen_dir/libraries_experimental_extras.bin",
]
outputs = [
@@ -456,6 +480,7 @@ action("run_mksnapshot") {
"--log-snapshot-positions",
"--logfile",
rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
+ "--startup_src",
rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
@@ -487,6 +512,7 @@ source_set("v8_nosnapshot") {
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
+ ":js2c_experimental_extras",
":v8_base",
]
@@ -495,6 +521,7 @@ source_set("v8_nosnapshot") {
"$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
+ "$target_gen_dir/experimental-extras-libraries.cc",
"src/snapshot/snapshot-empty.cc",
]
@@ -520,6 +547,7 @@ source_set("v8_snapshot") {
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
+ ":js2c_experimental_extras",
":v8_base",
]
public_deps = [
@@ -533,6 +561,7 @@ source_set("v8_snapshot") {
"$target_gen_dir/code-stub-libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
+ "$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/snapshot.cc",
]
@@ -554,6 +583,7 @@ if (v8_use_external_startup_data) {
":js2c_code_stubs",
":js2c_experimental",
":js2c_extras",
+ ":js2c_experimental_extras",
":v8_base",
]
public_deps = [
@@ -594,8 +624,6 @@ source_set("v8_base") {
"src/allocation.h",
"src/allocation-site-scopes.cc",
"src/allocation-site-scopes.h",
- "src/allocation-tracker.cc",
- "src/allocation-tracker.h",
"src/api.cc",
"src/api.h",
"src/api-natives.cc",
@@ -606,6 +634,8 @@ source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.h",
"src/assert-scope.cc",
+ "src/ast-expression-visitor.cc",
+ "src/ast-expression-visitor.h",
"src/ast-literal-reindexer.cc",
"src/ast-literal-reindexer.h",
"src/ast-numbering.cc",
@@ -614,6 +644,7 @@ source_set("v8_base") {
"src/ast-value-factory.h",
"src/ast.cc",
"src/ast.h",
+ "src/atomic-utils.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
"src/bailout-reason.cc",
@@ -638,14 +669,11 @@ source_set("v8_base") {
"src/char-predicates-inl.h",
"src/char-predicates.h",
"src/checks.h",
- "src/circular-queue-inl.h",
- "src/circular-queue.h",
"src/code-factory.cc",
"src/code-factory.h",
"src/code-stubs.cc",
"src/code-stubs.h",
"src/code-stubs-hydrogen.cc",
- "src/code.h",
"src/codegen.cc",
"src/codegen.h",
"src/compilation-cache.cc",
@@ -664,6 +692,8 @@ source_set("v8_base") {
"src/compiler/ast-loop-assignment-analyzer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
+ "src/compiler/bytecode-graph-builder.cc",
+ "src/compiler/bytecode-graph-builder.h",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/c-linkage.cc",
@@ -743,6 +773,8 @@ source_set("v8_base") {
"src/compiler/jump-threading.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
+ "src/compiler/live-range-separator.cc",
+ "src/compiler/live-range-separator.h",
"src/compiler/liveness-analyzer.cc",
"src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
@@ -781,8 +813,6 @@ source_set("v8_base") {
"src/compiler/pipeline.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
- "src/compiler/preprocess-live-ranges.cc",
- "src/compiler/preprocess-live-ranges.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
@@ -822,6 +852,7 @@ source_set("v8_base") {
"src/compiler.h",
"src/context-measure.cc",
"src/context-measure.h",
+ "src/contexts-inl.h",
"src/contexts.cc",
"src/contexts.h",
"src/conversions-inl.h",
@@ -829,9 +860,6 @@ source_set("v8_base") {
"src/conversions.h",
"src/counters.cc",
"src/counters.h",
- "src/cpu-profiler-inl.h",
- "src/cpu-profiler.cc",
- "src/cpu-profiler.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
@@ -904,11 +932,8 @@ source_set("v8_base") {
"src/handles.cc",
"src/handles.h",
"src/hashmap.h",
- "src/heap-profiler.cc",
- "src/heap-profiler.h",
- "src/heap-snapshot-generator-inl.h",
- "src/heap-snapshot-generator.cc",
- "src/heap-snapshot-generator.h",
+ "src/heap/array-buffer-tracker.cc",
+ "src/heap/array-buffer-tracker.h",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
@@ -916,8 +941,8 @@ source_set("v8_base") {
"src/heap/heap-inl.h",
"src/heap/heap.cc",
"src/heap/heap.h",
- "src/heap/identity-map.cc",
- "src/heap/identity-map.h",
+ "src/heap/incremental-marking-job.cc",
+ "src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/mark-compact-inl.h",
@@ -925,9 +950,18 @@ source_set("v8_base") {
"src/heap/mark-compact.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
+ "src/heap/object-stats.cc",
+ "src/heap/object-stats.h",
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
+ "src/heap/scavenge-job.h",
+ "src/heap/scavenge-job.cc",
+ "src/heap/scavenger-inl.h",
+ "src/heap/scavenger.cc",
+ "src/heap/scavenger.h",
+ "src/heap/slots-buffer.cc",
+ "src/heap/slots-buffer.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
@@ -1005,16 +1039,21 @@ source_set("v8_base") {
"src/ic/ic-compiler.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
+ "src/identity-map.cc",
+ "src/identity-map.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
+ "src/interpreter/bytecode-array-iterator.cc",
+ "src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
+ "src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
"src/json-parser.h",
@@ -1066,9 +1105,25 @@ source_set("v8_base") {
"src/preparser.h",
"src/prettyprinter.cc",
"src/prettyprinter.h",
- "src/profile-generator-inl.h",
- "src/profile-generator.cc",
- "src/profile-generator.h",
+ "src/profiler/allocation-tracker.cc",
+ "src/profiler/allocation-tracker.h",
+ "src/profiler/circular-queue-inl.h",
+ "src/profiler/circular-queue.h",
+ "src/profiler/cpu-profiler-inl.h",
+ "src/profiler/cpu-profiler.cc",
+ "src/profiler/cpu-profiler.h",
+ "src/profiler/heap-profiler.cc",
+ "src/profiler/heap-profiler.h",
+ "src/profiler/heap-snapshot-generator-inl.h",
+ "src/profiler/heap-snapshot-generator.cc",
+ "src/profiler/heap-snapshot-generator.h",
+ "src/profiler/profile-generator-inl.h",
+ "src/profiler/profile-generator.cc",
+ "src/profiler/profile-generator.h",
+ "src/profiler/sampler.cc",
+ "src/profiler/sampler.h",
+ "src/profiler/unbound-queue-inl.h",
+ "src/profiler/unbound-queue.h",
"src/property-details.h",
"src/property.cc",
"src/property.h",
@@ -1105,6 +1160,7 @@ source_set("v8_base") {
"src/runtime/runtime-generator.cc",
"src/runtime/runtime-i18n.cc",
"src/runtime/runtime-internal.cc",
+ "src/runtime/runtime-interpreter.cc",
"src/runtime/runtime-json.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-liveedit.cc",
@@ -1112,6 +1168,7 @@ source_set("v8_base") {
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-object.cc",
"src/runtime/runtime-observe.cc",
+ "src/runtime/runtime-operators.cc",
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
@@ -1126,8 +1183,6 @@ source_set("v8_base") {
"src/runtime/runtime.h",
"src/safepoint-table.cc",
"src/safepoint-table.h",
- "src/sampler.cc",
- "src/sampler.h",
"src/scanner-character-streams.cc",
"src/scanner-character-streams.h",
"src/scanner.cc",
@@ -1173,13 +1228,17 @@ source_set("v8_base") {
"src/types-inl.h",
"src/types.cc",
"src/types.h",
+ "src/typing-asm.cc",
+ "src/typing-asm.h",
+ "src/typing-reset.cc",
+ "src/typing-reset.h",
"src/typing.cc",
"src/typing.h",
- "src/unbound-queue-inl.h",
- "src/unbound-queue.h",
"src/unicode-inl.h",
"src/unicode.cc",
"src/unicode.h",
+ "src/unicode-cache-inl.h",
+ "src/unicode-cache.h",
"src/unicode-decoder.cc",
"src/unicode-decoder.h",
"src/unique.h",
@@ -1196,6 +1255,7 @@ source_set("v8_base") {
"src/version.h",
"src/vm-state-inl.h",
"src/vm-state.h",
+ "src/zone-type-cache.h",
"src/zone.cc",
"src/zone.h",
"src/zone-allocator.h",
@@ -1335,6 +1395,9 @@ source_set("v8_base") {
"src/arm64/decoder-arm64.cc",
"src/arm64/decoder-arm64.h",
"src/arm64/decoder-arm64-inl.h",
+ "src/arm64/delayed-masm-arm64.cc",
+ "src/arm64/delayed-masm-arm64.h",
+ "src/arm64/delayed-masm-arm64-inl.h",
"src/arm64/deoptimizer-arm64.cc",
"src/arm64/disasm-arm64.cc",
"src/arm64/disasm-arm64.h",
@@ -1462,7 +1525,7 @@ source_set("v8_base") {
]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
@@ -1561,7 +1624,7 @@ source_set("v8_libbase") {
]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
@@ -1628,7 +1691,7 @@ source_set("v8_libplatform") {
]
if (!is_debug) {
- configs -= [ "//build/config/compiler:optimize" ]
+ configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
@@ -1670,35 +1733,25 @@ if (current_toolchain == snapshot_toolchain) {
# Public targets
#
+if (v8_use_snapshot && v8_use_external_startup_data) {
+ snapshot_target = ":v8_external_snapshot"
+} else if (v8_use_snapshot) {
+ snapshot_target = ":v8_snapshot"
+} else {
+ assert(!v8_use_external_startup_data)
+ snapshot_target = ":v8_nosnapshot"
+}
+
if (is_component_build) {
component("v8") {
sources = [
"src/v8dll-main.cc",
]
- if (v8_use_snapshot && v8_use_external_startup_data) {
- deps = [
- ":v8_base",
- ]
- public_deps = [
- ":v8_external_snapshot",
- ]
- } else if (v8_use_snapshot) {
- deps = [
- ":v8_base",
- ]
- # v8_snapshot should be public so downstream targets can declare the
- # snapshot file as their input.
- public_deps = [
- ":v8_snapshot",
- ]
- } else {
- assert(!v8_use_external_startup_data)
- deps = [
- ":v8_base",
- ":v8_nosnapshot",
- ]
- }
+ public_deps = [
+ ":v8_base",
+ snapshot_target,
+ ]
configs -= [ "//build/config/compiler:chromium_code" ]
configs += [ "//build/config/compiler:no_chromium_code" ]
@@ -1717,31 +1770,16 @@ if (is_component_build) {
}
} else {
group("v8") {
- if (v8_use_snapshot && v8_use_external_startup_data) {
- deps = [
- ":v8_base",
- ":v8_external_snapshot",
- ]
- } else if (v8_use_snapshot) {
- deps = [
- ":v8_base",
- ]
- public_deps = [
- ":v8_snapshot",
- ]
- } else {
- assert(!v8_use_external_startup_data)
- deps = [
- ":v8_base",
- ":v8_nosnapshot",
- ]
- }
-
+ public_deps = [
+ ":v8_base",
+ snapshot_target,
+ ]
public_configs = [ ":external_config" ]
}
}
if ((current_toolchain == host_toolchain && v8_toolset_for_d8 == "host") ||
+ (current_toolchain == snapshot_toolchain && v8_toolset_for_d8 == "host") ||
(current_toolchain != host_toolchain && v8_toolset_for_d8 == "target")) {
executable("d8") {
sources = [
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 0dff96acc7..3c6e5d8850 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,1210 +1,7 @@
-2015-08-19: Version 4.6.85
+2015-05-17: Sentinel
- Performance and stability improvements on all platforms.
-
-
-2015-08-19: Version 4.6.84
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-19: Version 4.6.83
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-18: Version 4.6.82
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-18: Version 4.6.81
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-18: Version 4.6.80
-
- Filter out slot buffer slots, that point to SMIs in dead objects
- (Chromium issues 454297, 519577).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-17: Version 4.6.79
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-17: Version 4.6.78
-
- Put V8 extras into the snapshot.
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-15: Version 4.6.77
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-14: Version 4.6.76
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-14: Version 4.6.75
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-14: Version 4.6.74
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-13: Version 4.6.73
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-13: Version 4.6.72
-
- Stage sloppy classes (issue 3305).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-13: Version 4.6.71
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-12: Version 4.6.70
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-12: Version 4.6.69
-
- Stage --harmony-array-includes (issue 3575).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-12: Version 4.6.68
-
- Use a new lexical context for sloppy-mode eval (issue 4288).
-
- Add includes method to typed arrays (issue 3575).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-11: Version 4.6.67
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-11: Version 4.6.66
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-11: Version 4.6.65
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-10: Version 4.6.64
-
- Disable --global-var-shortcuts (Chromium issue 517778).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-10: Version 4.6.63
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-09: Version 4.6.62
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-08: Version 4.6.61
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-08: Version 4.6.60
-
- [IC] Make SeededNumberDictionary::UpdateMaxNumberKey prototype aware
- (issue 4335).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-08: Version 4.6.59
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-07: Version 4.6.58
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-07: Version 4.6.57
-
- Rename "extras exports" to "extras binding" (Chromium issue 507133).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-07: Version 4.6.56
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-06: Version 4.6.55
-
- Fix off-by-one in Array.concat's max index check (Chromium issue
- 516592).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-06: Version 4.6.54
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-06: Version 4.6.53
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-05: Version 4.6.52
-
- Ship --harmony-new-target (issue 3887).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-04: Version 4.6.51
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-04: Version 4.6.50
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-03: Version 4.6.49
-
- SIMD.js Add the other SIMD Phase 1 types (issue 4124).
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-03: Version 4.6.48
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-03: Version 4.6.47
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-01: Version 4.6.46
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-01: Version 4.6.45
-
- Performance and stability improvements on all platforms.
-
-
-2015-08-01: Version 4.6.44
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-31: Version 4.6.43
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-31: Version 4.6.42
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-31: Version 4.6.41
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-30: Version 4.6.40
-
- Pass the kGCCallbackFlagForced flag when invoking
- Heap::CollectAllGarbage from AdjustAmountOfExternalAllocatedMemory
- (Chromium issue 511294).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-30: Version 4.6.39
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-30: Version 4.6.38
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-29: Version 4.6.37
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-28: Version 4.6.36
-
- Fix prototype registration upon SlowToFast migration (Chromium issue
- 513602).
-
- Bugfix: Incorrect type feedback vector structure on recompile (Chromium
- issue 514526).
-
- Reland of "Remove ExternalArray, derived types, and element kinds"
- (issue 3996).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-28: Version 4.6.35
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-28: Version 4.6.34
-
- Remove ExternalArray, derived types, and element kinds (issue 3996).
-
- Make V8 compile with MSVS 2015 (issue 4326).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-27: Version 4.6.33
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-26: Version 4.6.32
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-25: Version 4.6.31
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-25: Version 4.6.30
-
- Make dates default to the local timezone if none specified (issue 4242,
- Chromium issue 391730).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-24: Version 4.6.29
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-24: Version 4.6.28
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-23: Version 4.6.27
-
- Fix check for a date with a 24th hour (Chromium issue 174609).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-23: Version 4.6.26
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-22: Version 4.6.25
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-22: Version 4.6.24
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-22: Version 4.6.23
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-21: Version 4.6.22
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-21: Version 4.6.21
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-21: Version 4.6.20
-
- Don't run the second pass of the pending phantom callbacks if the heap
- has been torn down (Chromium issue 511204).
-
- Debugger: prepare code for debugging on a per-function basis (issue
- 4132).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-20: Version 4.6.19
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-20: Version 4.6.18
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-19: Version 4.6.17
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-18: Version 4.6.16
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-18: Version 4.6.15
-
- Make NumberFormat use the ICU currency data, fix bug in NumberFormat
- (Chromium issues 304722, 435465, 473104).
-
- Properly fix enumerate / Object.keys wrt access checked objects
- (Chromium issue 509936).
-
- Fix object enumeration wrt access checked objects (Chromium issue
- 509936).
-
- Fix DefineOwnProperty for data properties wrt failed access checks
- (Chromium issue 509936).
-
- Fix GetOwnPropertyNames on access-checked objects (Chromium issue
- 509936).
-
- Fix getPrototypeOf for access checked objects (Chromium issue 509936).
-
- Delete APIs deprecated since last release.
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-17: Version 4.6.14
-
- Array.prototype.reverse should call [[HasProperty]] on elements before
- [[Get]] (issue 4223).
-
- In RegExp, lastIndex is read with ToLength, not ToInteger (issue 4244).
-
- Stage --harmony-new-target (issue 3887).
-
- Re-ship harmony spread calls and spread arrays (issue 3018).
-
- Expose SIMD.Float32x4 type to Javascript. This CL exposes the
- constructor function, defines type related information, and implements
- value type semantics. It also refactors test/mjsunit/samevalue.js to
- test SameValue and SameValueZero (issue 4124).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-17: Version 4.6.13
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-16: Version 4.6.12
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-16: Version 4.6.11
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-16: Version 4.6.10
-
- Expose SIMD.Float32x4 type to Javascript. This CL exposes the
- constructor function, defines type related information, and implements
- value type semantics. It also refactors test/mjsunit/samevalue.js to
- test SameValue and SameValueZero (issue 4124).
-
- Fix runtime-atomics for Win 10 SDK and remove volatile (Chromium issues
- 440500, 491424).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-15: Version 4.6.9
-
- Let the second pass phantom callbacks run in a separate task on the
- foreground thread.
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-15: Version 4.6.8
-
- Optimize String.prototype.includes (issue 3807).
-
- Unship spread calls and spread arrays (issue 4298).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-15: Version 4.6.7
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-14: Version 4.6.6
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-14: Version 4.6.5
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-14: Version 4.6.4
-
- MIPS64: Fix BlockTrampolinePoolFor() to emit trampoline before blocking,
- if needed (issue 4294).
-
- Add convenience method for converting v8::PersistentBase to v8::Local.
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-13: Version 4.6.3
-
- MIPS: Fix BlockTrampolinePoolFor() to emit trampoline before blocking,
- if needed (issue 4294).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-13: Version 4.6.2
-
- [arm] CheckConstPool between TurboFan instructions (issue 4292).
-
- Fix keyed access of primitive objects in the runtime. For now it uses a
- pretty slow path for accessing strings by wrapping it into a new
- temporary wrapper (issues 3088, 4042).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-12: Version 4.6.1
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-09: Version 4.5.107
-
- [arm] Don't call branch_offset within CheckConstPool (issue 4292).
-
- [arm] Fix missing CheckBuffer for branches (issue 4292).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-09: Version 4.5.106
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-09: Version 4.5.105
-
- Guard @@isConcatSpreadable behind a flag (Chromium issue 507553).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-08: Version 4.5.104
-
- [x64] Fix handling of Smi constants in LSubI and LBitI (Chromium issue
- 478612).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-08: Version 4.5.103
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-08: Version 4.5.102
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-07: Version 4.5.101
-
- Move compatible receiver check from CompileHandler to UpdateCaches
- (Chromium issue 505374).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-07: Version 4.5.100
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-07: Version 4.5.99
-
- unicode-decoder: fix out-of-band write in utf16 (issue 4274).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-06: Version 4.5.98
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-04: Version 4.5.97
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-03: Version 4.5.96
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-03: Version 4.5.95
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-02: Version 4.5.94
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-02: Version 4.5.93
-
- Include Harmony Array/TypedArray methods unconditionally (Chromium issue
- 504629).
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-02: Version 4.5.92
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-01: Version 4.5.91
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-01: Version 4.5.90
-
- Performance and stability improvements on all platforms.
-
-
-2015-07-01: Version 4.5.89
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-30: Version 4.5.88
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-30: Version 4.5.87
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-30: Version 4.5.86
-
- Ensure mjsunit tests use dashes not underscores in flags directives
- (Chromium issue 505228).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-29: Version 4.5.85
-
- Fix flag convention in handle count tests and comment (Chromium issue
- 505228).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-29: Version 4.5.84
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-27: Version 4.5.83
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-26: Version 4.5.82
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-26: Version 4.5.81
-
- Remove obsolete options in ScriptCompiler::CompileOptions (Chromium
- issue 399580).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-25: Version 4.5.80
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-25: Version 4.5.79
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-25: Version 4.5.78
-
- Serializer: clear next link in weak cells (Chromium issue 503552).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-24: Version 4.5.77
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-24: Version 4.5.76
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-24: Version 4.5.75
-
- Date() should not depend on Date.prototype.toString (issue 4225).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-23: Version 4.5.74
-
- Expose Map/Set methods through the API (issue 3340).
-
- [turbofan] NaN is never truish (issue 4207).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-23: Version 4.5.73
-
- Re-ship Harmony Array/TypedArray methods (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-23: Version 4.5.72
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-23: Version 4.5.71
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-20: Version 4.5.70
-
- Ship Harmony Array/TypedArray methods (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-20: Version 4.5.69
-
- Ship arrow functions (issue 2700).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-19: Version 4.5.68
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-19: Version 4.5.67
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-19: Version 4.5.66
-
- Ship arrow functions (issue 2700).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-18: Version 4.5.65
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-18: Version 4.5.64
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-18: Version 4.5.63
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-17: Version 4.5.62
-
- Hydrogen object literals: always initialize in-object properties
- (Chromium issue 500497).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-17: Version 4.5.61
-
- Add %TypedArray% to proto chain (issue 4085).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-17: Version 4.5.60
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-17: Version 4.5.59
-
- [crankshaft] Fix wrong bailout points in for-in loop body (Chromium
- issue 500435).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-16: Version 4.5.58
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-16: Version 4.5.57
-
- Inline code generation for %_IsTypedArray (issue 4085).
-
- Allow TypedArrays to be initialized with iterables (issue 4090).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-15: Version 4.5.56
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-15: Version 4.5.55
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-14: Version 4.5.54
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-13: Version 4.5.53
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-12: Version 4.5.52
-
- Map::TryUpdate() must be in sync with Map::Update() (issue 4173).
-
- Add ToObject call in Array.prototype.sort (issue 4125).
-
- In Array.of and Array.from, fall back to DefineOwnProperty (issue 4168).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-12: Version 4.5.51
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-11: Version 4.5.50
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-11: Version 4.5.49
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-11: Version 4.5.48
-
- Support rest parameters in arrow functions (issue 2700).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-10: Version 4.5.47
-
- Implement %TypedArray%.prototype.slice (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-09: Version 4.5.46
-
- Stage ES6 arrow functions (issue 2700).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-09: Version 4.5.45
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-09: Version 4.5.44
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-08: Version 4.5.43
-
- [for-in] Make ForInNext and ForInFilter deal properly with exceptions
- (Chromium issue 496331).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-08: Version 4.5.42
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-06: Version 4.5.41
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-05: Version 4.5.40
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-05: Version 4.5.39
-
- Stage ES6 Array and TypedArray methods (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-05: Version 4.5.38
-
- Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
-
- Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
- 478811).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-04: Version 4.5.37
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-04: Version 4.5.36
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-04: Version 4.5.35
-
- Flatten the Arrays returned and consumed by the v8::Map API (Chromium
- issue 478263).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-03: Version 4.5.34
-
- Also allocate small typed arrays on heap when initialized from an array-
- like (issue 3996).
-
- Implement %TypedArray%.prototype.{reduce,reduceRight} (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-03: Version 4.5.33
-
- Add support for Embedded Constant Pools for PPC and Arm (Chromium issue
- 478811).
-
- Implement %TypedArray%.prototype.{toString,toLocaleString,join} (issue
- 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-03: Version 4.5.32
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-02: Version 4.5.31
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-02: Version 4.5.30
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-01: Version 4.5.29
-
- Reland "Re-enable on-heap typed array allocation" (issue 3996).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-01: Version 4.5.28
-
- Re-enable on-heap typed array allocation (issue 3996).
-
- Also expose DefineOwnProperty (Chromium issue 475206).
-
- Performance and stability improvements on all platforms.
-
-
-2015-06-01: Version 4.5.27
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-31: Version 4.5.26
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-30: Version 4.5.25
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-29: Version 4.5.24
-
- Debugger: consider try-finally scopes not catching wrt debug events
- (Chromium issue 492522).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-29: Version 4.5.23
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-29: Version 4.5.22
-
- Do not eagerly convert exception to string when creating a message
- object (Chromium issue 490680).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-28: Version 4.5.21
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-28: Version 4.5.20
-
- Introduce v8::Object::CreateDataProperty (Chromium issue 475206).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-27: Version 4.5.19
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-27: Version 4.5.18
-
- Add {Map,Set}::FromArray to the API (issue 3340).
-
- Add {Map,Set}::AsArray to the API (issue 3340).
-
- Add basic API support for Map & Set (issue 3340).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-26: Version 4.5.17
-
- Correctly hook up materialized receiver into the evaluation context
- chain (Chromium issue 491943).
-
- Implement bookmarks for ExternalStreamingStream (Chromium issue 470930).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-26: Version 4.5.16
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-26: Version 4.5.15
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-23: Version 4.5.14
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-22: Version 4.5.13
-
- Remove v8::Private.
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-22: Version 4.5.12
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-22: Version 4.5.11
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-21: Version 4.5.10
-
- Re-land %TypedArray%.prototype.{map,filter,some} (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-21: Version 4.5.9
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-20: Version 4.5.8
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-20: Version 4.5.7
-
- Implement %TypedArray%.{lastI,i}ndexOf (issue 3578).
-
- Implement %TypedArray%.prototype.sort (issue 3578).
-
- Implement %TypedArray%.reverse (issue 3578).
-
- Implement %TypedArray%.prototype.{map,filter,some,reduce,reduceRight}
- (issue 3578).
-
- Fix has_pending_exception logic in API's Array::CloneElementAt (issue
- 4103).
-
- Adding api to get last gc object statistics for chrome://tracing
- (Chromium issue 476013).
-
- Fix harmless HGraph verification failure after hoisting inlined bounds
- checks (Chromium issue 487608).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-20: Version 4.5.6
-
- Add TypedArray.from method (issue 3578).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-19: Version 4.5.5
-
- ARM64: Propagate notification about aborted compilation from
- RegExpEngine to MacroAssembler (Chromium issue 489290).
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-18: Version 4.5.4
-
- Performance and stability improvements on all platforms.
-
-
-2015-05-18: Version 4.5.3
-
- Performance and stability improvements on all platforms.
+ The ChangeLog file is no longer maintained on bleeding_edge. This
+ sentinel should stay on top of this list.
2015-05-17: Version 4.5.2
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index c8c7de080c..85d14c75fd 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,23 +8,25 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "6ee91ad8659871916f9aa840d42e1513befdf638",
+ Var("git_url") + "/external/gyp.git" + "@" + "01528c7244837168a1c80f06ff60fa5a9793c824",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "89dcdec16381883782b9cc9cff38e00f047a0f46",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "423fc7e1107fb08ccf007c4aeb76dcab8b2747c1",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "565d04e8741429fb1b4f26d102f2c6c3b849edeb",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "e7111440c07a883b82ffbbe6d26c744dfc6c9673",
+ "v8/tools/swarming_client":
+ Var('git_url') + '/external/swarming.client.git' + '@' + "6e5d2b21f0ac98396cd736097a985346feed1328",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "9855a87157778d39b95eccfb201a9dc90f6d61c6",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "0421b6f358139f02e102c9c332ce19a33faf75be",
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "5b12e334ec0e571a8e1f68d028dc5427b58c17ec",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "0150e39a3112dbc7e4c7a3ab25276b8d7781f3b6",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("git_url") + "/android_tools.git" + "@" + "9e9b6169a098bc19986e44fbbf65e4c29031e4bd",
+ Var("git_url") + "/android_tools.git" + "@" + "4238a28593b7e6178c95431f91ca8c24e45fa7eb",
},
"win": {
"v8/third_party/cygwin":
@@ -103,6 +105,13 @@ hooks = [
],
},
{
+ # Pull gold plugin if needed or requested via GYP_DEFINES.
+ # Note: This must run before the clang update.
+ 'name': 'gold_plugin',
+ 'pattern': '.',
+ 'action': ['python', 'v8/build/download_gold_plugin.py'],
+ },
+ {
# Pull clang if needed or requested via GYP_DEFINES.
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index baa3b52ca6..28c1af2e07 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -143,6 +143,10 @@ endif
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
+# vectorstores=on
+ifeq ($(vectorstores), on)
+ GYPFLAGS += -Dv8_vector_stores=1
+endif
# imminentdeprecationwarnings=on
ifeq ($(imminentdeprecationwarnings), on)
GYPFLAGS += -Dv8_imminent_deprecation_warnings=1
@@ -240,7 +244,7 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 1bcd9922c5..ab9bba8845 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -67,7 +67,6 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
- from presubmit import CheckRuntimeVsNativesNameClashes
from presubmit import CheckExternalReferenceRegistration
from presubmit import CheckAuthorizedAuthor
@@ -78,9 +77,6 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
- if not CheckRuntimeVsNativesNameClashes(input_api.PresubmitLocalPath()):
- results.append(output_api.PresubmitError(
- "Runtime/natives name clash check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError(
"External references registration check failed"))
@@ -237,7 +233,6 @@ def _SkipTreeCheck(input_api, output_api):
"""Check the env var whether we want to skip tree check.
Only skip if include/v8-version.h has been updated."""
src_version = 'include/v8-version.h'
- FilterFile = lambda file: file.LocalPath() == src_version
if not input_api.AffectedSourceFiles(
lambda file: file.LocalPath() == src_version):
return False
diff --git a/deps/v8/README.md b/deps/v8/README.md
index 5cd4b5811a..c649f02ec2 100644
--- a/deps/v8/README.md
+++ b/deps/v8/README.md
@@ -31,3 +31,10 @@ configuration in `.git/config`:
fetch = +refs/branch-heads/*:refs/remotes/branch-heads/*
fetch = +refs/tags/*:refs/tags/*
+
+
+Contributing
+=============
+
+Please follow the instructions mentioned on the
+[V8 wiki](https://code.google.com/p/v8-wiki/wiki/Contributing).
diff --git a/deps/v8/build/download_gold_plugin.py b/deps/v8/build/download_gold_plugin.py
index 7a0c21b8d7..b8131fd449 100755
--- a/deps/v8/build/download_gold_plugin.py
+++ b/deps/v8/build/download_gold_plugin.py
@@ -8,11 +8,20 @@
import json
import os
+import re
+import platform
import shutil
import subprocess
import sys
import zipfile
+# Bail out on windows and cygwin.
+if "win" in platform.system().lower():
+ # Python 2.7.6 hangs at the second path.insert command on windows. Works
+ # with python 2.7.8.
+ print "Gold plugin download not supported on windows."
+ sys.exit(0)
+
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(CHROME_SRC, 'tools'))
@@ -30,7 +39,28 @@ CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
+GOLD_PLUGIN_PATH = os.path.join(LLVM_BUILD_PATH, 'lib', 'LLVMgold.so')
+
+sys.path.insert(0, os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts'))
+
+import update
+
def main():
+ if not re.search(r'cfi_vptr=1', os.environ.get('GYP_DEFINES', '')):
+ # Bailout if this is not a cfi build.
+ print 'Skipping gold plugin download for non-cfi build.'
+ return 0
+ if (os.path.exists(GOLD_PLUGIN_PATH) and
+ update.ReadStampFile().strip() == update.PACKAGE_VERSION):
+ # Bailout if clang is up-to-date. This requires the script to be run before
+ # the clang update step! I.e. afterwards clang would always be up-to-date.
+ print 'Skipping gold plugin download. File present and clang up to date.'
+ return 0
+
+ # Make sure this works on empty checkouts (i.e. clang not downloaded yet).
+ if not os.path.exists(LLVM_BUILD_PATH):
+ os.makedirs(LLVM_BUILD_PATH)
+
targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 21e55740ae..741ba75eb6 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -67,6 +67,9 @@
# Set to 1 to enable building with wasm prototype.
'v8_wasm%': 0,
+
+ # Enable/disable JavaScript API accessors.
+ 'v8_js_accessors%': 0,
},
'target_defaults': {
'conditions': [
@@ -109,6 +112,9 @@
['v8_wasm!=0', {
'defines': ['V8_WASM',],
}],
+ ['v8_js_accessors!=0', {
+ 'defines': ['V8_JS_ACCESSORS'],
+ }],
], # conditions
'configurations': {
'DebugBaseCommon': {
diff --git a/deps/v8/build/isolate.gypi b/deps/v8/build/isolate.gypi
new file mode 100644
index 0000000000..9e2a3bfee3
--- /dev/null
+++ b/deps/v8/build/isolate.gypi
@@ -0,0 +1,74 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is meant to be included into a target to provide a rule
+# to "build" .isolate files into a .isolated file.
+#
+# To use this, create a gyp target with the following form:
+# 'conditions': [
+# ['test_isolation_mode != "noop"', {
+# 'targets': [
+# {
+# 'target_name': 'foo_test_run',
+# 'type': 'none',
+# 'dependencies': [
+# 'foo_test',
+# ],
+# 'includes': [
+# '../build/isolate.gypi',
+# ],
+# 'sources': [
+# 'foo_test.isolate',
+# ],
+# },
+# ],
+# }],
+# ],
+#
+# Note: foo_test.isolate is included and a source file. It is an inherent
+# property of the .isolate format. This permits to define GYP variables but is
+# a stricter format than GYP so isolate.py can read it.
+#
+# The generated .isolated file will be:
+# <(PRODUCT_DIR)/foo_test.isolated
+#
+# See http://dev.chromium.org/developers/testing/isolated-testing/for-swes
+# for more information.
+
+{
+ 'rules': [
+ {
+ 'rule_name': 'isolate',
+ 'extension': 'isolate',
+ 'inputs': [
+ # Files that are known to be involved in this step.
+ '<(DEPTH)/tools/swarming_client/isolate.py',
+ '<(DEPTH)/tools/swarming_client/run_isolated.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
+ ],
+ 'action': [
+ 'python',
+ '<(DEPTH)/tools/swarming_client/isolate.py',
+ '<(test_isolation_mode)',
+ '--isolated', '<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
+ '--isolate', '<(RULE_INPUT_PATH)',
+
+ # Variables should use the -V FOO=<(FOO) form so frequent values,
+ # like '0' or '1', aren't stripped out by GYP. Run 'isolate.py help'
+ # for more details.
+
+ # Path variables are used to replace file paths when loading a .isolate
+ # file
+ '--path-variable', 'DEPTH', '<(DEPTH)',
+ '--path-variable', 'PRODUCT_DIR', '<(PRODUCT_DIR)',
+
+ '--config-variable', 'OS=<(OS)',
+ '--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
+ ],
+ },
+ ],
+}
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index bf06bfa20f..7250579d27 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -95,6 +95,8 @@
'cfi_vptr%': 0,
'cfi_diag%': 0,
+ 'cfi_blacklist%': '<(base_dir)/tools/cfi/blacklist.txt',
+
# goma settings.
# 1 to use goma.
# If no gomadir is set, it uses the default gomadir.
@@ -122,6 +124,14 @@
}, {
'linux_use_bundled_gold%': 0,
}],
+
+ # TODO(machenbach): Remove the conditions as more configurations are
+ # supported.
+ ['OS=="linux"', {
+ 'test_isolation_mode%': 'check',
+ }, {
+ 'test_isolation_mode%': 'noop',
+ }],
],
},
'base_dir%': '<(base_dir)',
@@ -143,9 +153,12 @@
'use_lto%': '<(use_lto)',
'cfi_vptr%': '<(cfi_vptr)',
'cfi_diag%': '<(cfi_diag)',
+ 'cfi_blacklist%': '<(cfi_blacklist)',
+ 'test_isolation_mode%': '<(test_isolation_mode)',
- # Add a simple extra solely for the purpose of the cctests
+ # Add a simple extras solely for the purpose of the cctests
'v8_extra_library_files': ['../test/cctest/test-extra.js'],
+ 'v8_experimental_extra_library_files': ['../test/cctest/test-experimental-extra.js'],
# .gyp files or targets should set v8_code to 1 if they build V8 specific
# code, as opposed to external code. This variable is used to control such
@@ -399,8 +412,25 @@
# things when their commandline changes). Nothing should ever read this
# define.
'defines': ['CR_CLANG_REVISION=<!(<(DEPTH)/tools/clang/scripts/update.sh --print-revision)'],
- 'cflags+': [
- '-Wno-format-pedantic',
+ 'conditions': [
+ ['host_clang==1', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags+': [
+ '-Wno-format-pedantic',
+ ],
+ }],
+ ],
+ }],
+ ['clang==1', {
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'cflags+': [
+ '-Wno-format-pedantic',
+ ],
+ }],
+ ],
+ }],
],
}],
],
@@ -1247,6 +1277,12 @@
'-fno-sanitize-trap=cfi',
'-fsanitize-recover=cfi',
],
+ 'cflags_cc!': [
+ '-fno-rtti',
+ ],
+ 'cflags!': [
+ '-fno-rtti',
+ ],
'ldflags': [
'-fno-sanitize-trap=cfi',
'-fsanitize-recover=cfi',
@@ -1263,6 +1299,7 @@
'-fsanitize=cfi-vcall',
'-fsanitize=cfi-derived-cast',
'-fsanitize=cfi-unrelated-cast',
+ '-fsanitize-blacklist=<(cfi_blacklist)',
],
'ldflags': [
'-fsanitize=cfi-vcall',
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index a8a3b56ec2..bd081e1791 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -277,10 +277,12 @@
}],
],
}],
- # Disable LTO for v8
- # v8 is optimized for speed, which takes precedence over
- # size optimization in LTO.
- ['use_lto==1', {
+ # Disable GCC LTO for v8
+ # v8 is optimized for speed. Because GCC LTO merges flags at link
+ # time, we disable LTO to prevent any -O2 flags from taking
+ # precedence over v8's -Os flag. However, LLVM LTO does not work
+ # this way so we keep LTO enabled under LLVM.
+ ['clang==0 and use_lto==1', {
'cflags!': [
'-flto',
'-ffat-lto-objects',
@@ -339,7 +341,8 @@
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['(v8_target_arch=="mips" or v8_target_arch=="mipsel" \
- or v8_target_arch=="mips64el") and v8_target_arch==target_arch', {
+ or v8_target_arch=="mips64" or v8_target_arch=="mips64el") \
+ and v8_target_arch==target_arch', {
'target_conditions': [
['_toolset=="target"', {
# Target built with a Mips CXX compiler.
@@ -741,7 +744,7 @@
}],
],
}], # v8_target_arch=="mipsel"
- ['v8_target_arch=="mips64el"', {
+ ['v8_target_arch=="mips64el" or v8_target_arch=="mips64"', {
'defines': [
'V8_TARGET_ARCH_MIPS64',
],
@@ -751,6 +754,16 @@
'CAN_USE_FPU_INSTRUCTIONS',
],
}],
+ [ 'v8_host_byteorder=="little"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_MIPS64_LE',
+ ],
+ }],
+ [ 'v8_host_byteorder=="big"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_MIPS64_BE',
+ ],
+ }],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'defines': [
'__mips_hard_float=1',
@@ -767,11 +780,17 @@
'conditions': [
['v8_target_arch==target_arch', {
'cflags': [
- '-EL',
'-Wno-error=array-bounds', # Workaround https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56273
],
- 'ldflags': ['-EL'],
'conditions': [
+ ['v8_target_arch=="mips64el"', {
+ 'cflags': ['-EL'],
+ 'ldflags': ['-EL'],
+ }],
+ ['v8_target_arch=="mips64"', {
+ 'cflags': ['-EB'],
+ 'ldflags': ['-EB'],
+ }],
[ 'v8_use_mips_abi_hardfloat=="true"', {
'cflags': ['-mhard-float'],
'ldflags': ['-mhard-float'],
diff --git a/deps/v8/docs/arm_debugging_with_the_simulator.md b/deps/v8/docs/arm_debugging_with_the_simulator.md
new file mode 100644
index 0000000000..eceb7a58ab
--- /dev/null
+++ b/deps/v8/docs/arm_debugging_with_the_simulator.md
@@ -0,0 +1,205 @@
+# ARM debugging with the simulator
+
+The simulator and debugger can be very helpful when working with v8 code generation.
+
+ * It is convenient as it allows you to test code generation without access to actual hardware.
+ * No cross or native compilation is needed.
+ * The simulator fully supports the debugging of generated code.
+
+Please note that this simulator is designed for v8 purposes. Only the features used by v8 are implemented, and you might encounter unimplemented features or instructions. In this case, feel free to implement them and submit the code!
+
+
+## Details on the ARM Debugger
+
+Compile the ARM simulator shell with:
+```
+make arm.debug
+```
+on an x86 host using your regular compiler.
+
+### Starting the Debugger
+There are different ways of starting the debugger:
+
+```
+$ out/arm.debug/d8 --stop_sim_at <n>
+```
+The simulator will start the debugger after executing n instructions.
+
+```
+$ out/arm.debug/d8 --stop_at <function name>
+```
+
+The simulator will stop at the given JavaScript function.
+
+Also you can directly generate 'stop' instructions in the ARM code. Stops are generated with
+
+```
+Assembler::stop(const char* msg, Condition cond, int32_t code)
+```
+
+When the Simulator hits a stop, it will print msg and start the debugger.
+
+
+### Debugging commands.
+
+**Usual commands:**
+
+Enter `help` in the debugger prompt to get details on available commands. These include usual gdb-like commands, such as stepi, cont, disasm, etc. If the Simulator is run under gdb, the ā€œgdbā€ debugger command will give control to gdb. You can then use cont from gdb to go back to the debugger.
+
+
+**Debugger specific commands:**
+
+Here's a list of the ARM debugger specific commands, along with examples.
+The JavaScript file ā€œfunc.jsā€ used below contains:
+
+```
+function test() {
+ print(ā€œIn function test.ā€);
+}
+test();
+```
+
+ * **printobject** `<`register`>` (alias po), will describe an object held in a register.
+
+```
+$ out/arm.debug/d8 func.js --stop_at test
+
+Simulator hit stop-at
+ 0xb544d6a8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
+sim> print r0
+r0: 0xb547ec15 -1253577707
+sim> printobject r0
+r0:
+0xb547ec15: [Function]
+ - map = 0x0xb540ff01
+ - initial_map =
+ - shared_info = 0xb547eb2d <SharedFunctionInfo>
+ - name = #test
+ - context = 0xb60083f1 <FixedArray[52]>
+ - code = 0xb544d681 <Code>
+ #arguments: 0xb545a15d <Proxy> (callback)
+ #length: 0xb545a14d <Proxy> (callback)
+ #name: 0xb545a155 <Proxy> (callback)
+ #prototype: 0xb545a145 <Proxy> (callback)
+ #caller: 0xb545a165 <Proxy> (callback)
+```
+
+ * **break** `<`address`>`, will insert a breakpoint at the specified address.
+
+ * **del**, will delete the current breakpoint.
+
+You can have only one such breakpoint. This is useful if you want to insert a breakpoint at runtime.
+```
+$ out/arm.debug/d8 func.js --stop_at test
+
+Simulator hit stop-at
+ 0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
+sim> disasm 5
+ 0xb53a1ee8 e92d4902 stmdb sp!, {r1, r8, fp, lr}
+ 0xb53a1eec e28db008 add fp, sp, #8
+ 0xb53a1ef0 e59a200c ldr r2, [r10, #+12]
+ 0xb53a1ef4 e28fe004 add lr, pc, #4
+ 0xb53a1ef8 e15d0002 cmp sp, r2
+sim> break 0xb53a1ef8
+sim> cont
+ 0xb53a1ef8 e15d0002 cmp sp, r2
+sim> disasm 5
+ 0xb53a1ef8 e15d0002 cmp sp, r2
+ 0xb53a1efc 359ff034 ldrcc pc, [pc, #+52]
+ 0xb53a1f00 e5980017 ldr r0, [r8, #+23]
+ 0xb53a1f04 e59f1030 ldr r1, [pc, #+48]
+ 0xb53a1f08 e52d0004 str r0, [sp, #-4]!
+sim> break 0xb53a1f08
+setting breakpoint failed
+sim> del
+sim> break 0xb53a1f08
+sim> cont
+ 0xb53a1f08 e52d0004 str r0, [sp, #-4]!
+sim> del
+sim> cont
+In function test.
+```
+
+ * Generated `stop` instuctions, will work as breakpoints with a few additional features.
+
+The first argument is a help message, the second is the condition, and the third is the stop code. If a code is specified, and is less than 256, the stop is said to be ā€œwatchedā€, and can be disabled/enabled; a counter also keeps track of how many times the Simulator hits this code.
+
+If we are working on this v8 C++ code, which is reached when running our JavaScript file.
+
+```
+__ stop("My stop.", al, 123);
+__ mov(r0, r0);
+__ mov(r0, r0);
+__ mov(r0, r0);
+__ mov(r0, r0);
+__ mov(r0, r0);
+__ stop("My second stop.", al, 0x1);
+__ mov(r1, r1);
+__ mov(r1, r1);
+__ mov(r1, r1);
+__ mov(r1, r1);
+__ mov(r1, r1);
+```
+
+Here's a sample debugging session:
+
+We hit the first stop.
+
+```
+Simulator hit My stop.
+ 0xb53559e8 e1a00000 mov r0, r0
+```
+
+We can see the following stop using disasm. The address of the message string is inlined in the code after the svc stop instruction.
+
+```
+sim> disasm
+ 0xb53559e8 e1a00000 mov r0, r0
+ 0xb53559ec e1a00000 mov r0, r0
+ 0xb53559f0 e1a00000 mov r0, r0
+ 0xb53559f4 e1a00000 mov r0, r0
+ 0xb53559f8 e1a00000 mov r0, r0
+ 0xb53559fc ef800001 stop 1 - 0x1
+ 0xb5355a00 08338a97 stop message: My second stop
+ 0xb5355a04 e1a00000 mov r1, r1
+ 0xb5355a08 e1a00000 mov r1, r1
+ 0xb5355a0c e1a00000 mov r1, r1
+```
+
+Information can be printed for all (watched) stops which were hit at least once.
+
+```
+sim> stop info all
+Stop information:
+stop 123 - 0x7b: Enabled, counter = 1, My stop.
+sim> cont
+Simulator hit My second stop
+ 0xb5355a04 e1a00000 mov r1, r1
+sim> stop info all
+Stop information:
+stop 1 - 0x1: Enabled, counter = 1, My second stop
+stop 123 - 0x7b: Enabled, counter = 1, My stop.
+```
+
+Stops can be disabled or enabled. (Only available for watched stops.)
+
+```
+sim> stop disable 1
+sim> cont
+Simulator hit My stop.
+ 0xb5356808 e1a00000 mov r0, r0
+sim> cont
+Simulator hit My stop.
+ 0xb5356c28 e1a00000 mov r0, r0
+sim> stop info all
+Stop information:
+stop 1 - 0x1: Disabled, counter = 2, My second stop
+stop 123 - 0x7b: Enabled, counter = 3, My stop.
+sim> stop enable 1
+sim> cont
+Simulator hit My second stop
+ 0xb5356c44 e1a00000 mov r1, r1
+sim> stop disable all
+sim> con
+In function test.
+``` \ No newline at end of file
diff --git a/deps/v8/docs/becoming_v8_committer.md b/deps/v8/docs/becoming_v8_committer.md
new file mode 100644
index 0000000000..16e8491b06
--- /dev/null
+++ b/deps/v8/docs/becoming_v8_committer.md
@@ -0,0 +1,38 @@
+## What is a committer?
+
+Technically, a committer is someone who has write access to the V8 SVN repository. A committer can submit his or her own patches or patches from others.
+
+This privilege is granted with some expectation of responsibility: committers are people who care about the V8 project and want to help meet its goals. A committer is not just someone who can make changes, but someone who has demonstrated his or her ability to collaborate with the team, get the most knowledgeable people to review code, contribute high-quality code, and follow through to fix issues (in code or tests).
+
+A committer is a contributor to the V8 projects' success and a citizen helping the projects succeed. See V8CommittersResponsibility.
+
+## How do I become a committer?
+
+In a nutshell, contribute 20 non-trivial patches and get at least three different people to review them (you'll need three people to support you). Then ask someone to nominate you. You're demonstrating your:
+
+ * commitment to the project (20 good patches requires a lot of your valuable time),
+ * ability to collaborate with the team,
+ * understanding of how the team works (policies, processes for testing and code review, etc),
+ * understanding of the projects' code base and coding style, and
+ * ability to write good code (last but certainly not least)
+
+A current committer nominates you by sending email to v8-committers@googlegroups.com containing:
+
+ * your first and last name
+ * your Google Code email address
+ * an explanation of why you should be a committer,
+ * embedded list of links to revisions (about top 10) containing your patches
+
+Two other committers need to second your nomination. If no one objects in 5 working days (U.S.), you're a committer. If anyone objects or wants more information, the committers discuss and usually come to a consensus (within the 5 working days). If issues cannot be resolved, there's a vote among current committers.
+
+Once you get approval from the existing committers, we'll send you instructions for write access to SVN or Git. You'll also be added to v8-committers@googlegroups.com.
+
+In the worst case, this can drag out for two weeks. Keep writing patches! Even in the rare cases where a nomination fails, the objection is usually something easy to address like "more patches" or "not enough people are familiar with this person's work."
+
+## Maintaining committer status
+
+You don't really need to do much to maintain committer status: just keep being awesome and helping the V8 project!
+
+In the unhappy event that a committer continues to disregard good citizenship (or actively disrupts the project), we may need to revoke that person's status. The process is the same as for nominating a new committer: someone suggests the revocation with a good reason, two people second the motion, and a vote may be called if consensus cannot be reached. I hope that's simple enough, and that we never have to test it in practice.
+
+(Source: inspired by http://dev.chromium.org/getting-involved/become-a-committer ) \ No newline at end of file
diff --git a/deps/v8/docs/building_with_gyp.md b/deps/v8/docs/building_with_gyp.md
new file mode 100644
index 0000000000..0183fd2de5
--- /dev/null
+++ b/deps/v8/docs/building_with_gyp.md
@@ -0,0 +1,260 @@
+**Build issues? File a bug at code.google.com/p/v8/issues or ask for help on v8-users@googlegroups.com.**
+
+# Building V8
+
+V8 is built with the help of [GYP](http://code.google.com/p/gyp/). GYP is a meta build system of sorts, as it generates build files for a number of other build systems. How you build therefore depends on what "back-end" build system and compiler you're using.
+The instructions below assume that you already have a [checkout of V8](using_git.md) but haven't yet installed the build dependencies.
+
+If you intend to develop on V8, i.e., send patches and work with changelists, you will need to install the dependencies as described [here](using_git.md).
+
+
+## Prerequisite: Installing GYP
+
+First, you need GYP itself. GYP is fetched together with the other dependencies by running:
+
+```
+gclient sync
+```
+
+## Building
+
+### GCC + make
+
+Requires GNU make 3.81 or later. Should work with any GCC >= 4.8 or any recent clang (3.5 highly recommended).
+
+#### Build instructions
+
+
+The top-level Makefile defines a number of targets for each target architecture (`ia32`, `x64`, `arm`, `arm64`) and mode (`debug`, `optdebug`, or `release`). So your basic command for building is:
+```
+make ia32.release
+```
+
+or analogously for the other architectures and modes. You can build both debug and release binaries with just one command:
+```
+make ia32
+```
+
+To automatically build in release mode for the host architecture:
+```
+make native
+```
+
+You can also can build all architectures in a given mode at once:
+```
+make release
+```
+
+Or everything:
+```
+make
+```
+
+#### Optional parameters
+
+ * `-j` specifies the number of parallel build processes. Set it (roughly) to the number of CPU cores your machine has. The GYP/make based V8 build also supports distcc, so you can compile with `-j100` or so, provided you have enough machines around.
+
+ * `OUTDIR=foo` specifies where the compiled binaries go. It defaults to `./out/`. In this directory, a subdirectory will be created for each architecture and mode. You will find the d8 shell's binary in `foo/ia32.release/d8`, for example.
+
+ * `library=shared` or `component=shared_library` (the two are completely equivalent) builds V8 as a shared library (`libv8.so`).
+
+ * `soname_version=1.2.3` is only relevant for shared library builds and configures the SONAME of the library. Both the SONAME and the filename of the library will be `libv8.so.1.2.3` if you specify this. Due to a peculiarity in GYP, if you specify a custom SONAME, the library's path will no longer be encoded in the binaries, so you'll have to run d8 as follows:
+```
+LD_LIBRARY_PATH=out/ia32.release/lib.target out/ia32.release/d8
+```
+
+ * `console=readline` enables readline support for the d8 shell. You need readline development headers for this (`libreadline-dev` on Ubuntu).
+
+ * `disassembler=on` enables the disassembler for release mode binaries (it's always enabled for debug binaries). This is useful if you want to inspect generated machine code.
+
+ * `snapshot=off` disables building with a heap snapshot. Compiling will be a little faster, but V8ā€™s start up will be slightly slower.
+
+ * `gdbjit=on` enables GDB JIT support.
+
+ * `liveobjectlist=on` enables the Live Object List feature.
+
+ * `vfp3=off` is only relevant for ARM builds with snapshot and disables the use of VFP3 instructions in the snapshot.
+
+ * `debuggersupport=off` disables the javascript debugger.
+
+ * `werror=no` omits the -Werror flag. This is especially useful for not officially supported C++ compilers (e.g. newer versions of the GCC) so that compile warnings are ignored.
+
+ * `strictaliasing=off` passes the -fno-strict-aliasing flag to GCC. This may help to work around build failures on officially unsupported platforms and/or GCC versions.
+
+ * `regexp=interpreted` chooses the interpreted mode of the irregexp regular expression engine instead of the native code mode.
+
+ * `hardfp=on` creates "hardfp" binaries on ARM.
+
+### Ninja
+
+To build d8:
+```
+export GYP_GENERATORS=ninja
+build/gyp_v8
+ninja -C out/Debug d8
+```
+
+Specify `out/Release` for a release build. I recommend setting up an alias so that you don't need to type out that build directory path.
+
+If you want to build all targets, use `ninja -C out/Debug all`. It's faster to build only the target you're working on, like `d8` or `unittests`.
+
+Note: You need to set `v8_target_arch` if you want a non-native build, i.e. either
+```
+export GYP_DEFINES="v8_target_arch=arm"
+build/gyp_v8 ...
+```
+or
+```
+build/gyp_v8 -Dv8_target_arch=arm ...
+```
+
+
+#### Using goma (Googlers only)
+
+To use goma you need to set the `use_goma` gyp define, either by passing it to `gyp_v8`, i.e.
+```
+build/gyp_v8 -Duse_goma=1
+```
+or by setting the environment variable `$GYP_DEFINES` appropriately:
+```
+export GYP_DEFINES="use_goma=1"
+```
+Note: You may need to also set `gomadir` to point to the directory where you installed goma, if it's not in the default location.
+
+If you are using goma, you'll also want to bump the job limit, i.e.
+```
+ninja -j 100 -C out/Debug d8
+```
+
+
+### Cross-compiling
+
+Similar to building with Clang, you can also use a cross-compiler. Just export your toolchain (`CXX`/`LINK` environment variables should be enough) and compile. For example:
+```
+export CXX=/path/to/cross-compile-g++
+export LINK=/path/to/cross-compile-g++
+make arm.release
+```
+
+
+### Xcode
+
+From the root of your V8 checkout, run either of:
+```
+build/gyp_v8 -Dtarget_arch=ia32
+build/gyp_v8 -Dtarget_arch=x64
+```
+
+This will generate Xcode project files in `build/` that you can then either open with Xcode or compile directly from the command line:
+```
+xcodebuild -project build/all.xcodeproj -configuration Release
+xcodebuild -project build/all.xcodeproj
+```
+
+Note: If you have configured your `GYP_GENERATORS` environment variable, either unset it, or set it to `xcode` for this to work.
+
+
+#### Custom build settings
+
+You can export the `GYP_DEFINES` environment variable in your shell to configure custom build options. The syntax is `GYP_DEFINES="-Dvariable1=value1 -Dvariable2=value2"` and so on for as many variables as you wish. Possibly interesting options include:
+ * `-Dcomponent=shared_library` (see `library=shared` in the [GCC + make](#Optional_parameters.md) section above)
+ * `-Dconsole=readline` (see `console=readline`)
+ * `-Dv8_enable_disassembler=1` (see `disassembler=on`)
+ * `-Dv8_use_snapshot='false'` (see `snapshot=off`)
+ * `-Dv8_enable_gdbjit=1` (see `gdbjit=on`)
+ * `-Dv8_use_liveobjectlist=true` (see `liveobjectlist=on`)
+
+
+### Visual Studio
+
+You need Visual Studio 2013, older versions might still work at the moment, but this will probably change soon because we intend to use C++11 features.
+
+#### Prerequisites
+
+After you created [checkout of V8](using_git.md), all dependencies will be already installed.
+
+If you are getting errors during build mentioning that 'python' could not be found, add the 'python.exe' to PATH.
+
+If you have Visual Studio 2013 and 2015 installed side-by-side and set the environment variable GYP\_MSVS\_VERSION to '2013'. In that case the right project files are going to be created.
+
+#### Building
+ * If you use the command prompt:
+ 1. Generate project files:
+```
+python build\gyp_v8
+```
+> > > Specify the path to `python.exe` if you don't have it in your PATH.
+> > > Append `-Dtarget_arch=x64` if you want to build 64bit binaries. If you switch between ia32 and x64 targets, you may have to manually delete the generated .vcproj/.sln files before regenerating them.
+> > > Example:
+```
+third_party/python_26/python.exe build\gyp_v8 -Dtarget_arch=x64
+```
+ 1. Build:
+> > > Either open `build\All.sln` in Visual Studio, or compile on the command line as follows (adapt the path as necessary, or simply put `devenv.com` in your PATH):
+```
+"c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\IDE\devenv.com" /build Release build\All.sln
+```
+> > > Replace `Release` with `Debug` to build in Debug mode.
+> > > The built binaries will be in build\Release\ or build\Debug\.
+
+ * If you use cygwin, the workflow is the same, but the syntax is slightly different:
+ 1. Generate project files:
+```
+build/gyp_v8
+```
+> > > This will spit out a bunch of warnings about missing input files, but it seems to be OK to ignore them. (If you have time to figure this out, we'd happily accept a patch that makes the warnings go away!)
+ 1. Build:
+```
+/cygdrive/c/Program\ Files\ (x86)/Microsoft\ Visual\ Studio\ 9.0/Common7/IDE/devenv.com /build Release build/all.sln
+```
+
+
+#### Custom build settings
+
+See the "custom build settings" section for [Xcode](#Xcode) above.
+
+
+#### Running tests
+
+You can abuse the test driver's --buildbot flag to make it find the executables where MSVC puts them:
+```
+python tools/run-tests.py --buildbot --outdir build --arch ia32 --mode Release
+```
+
+
+### MinGW
+
+Building on MinGW is not officially supported, but it is possible. You even have two options:
+
+#### Option 1: With Cygwin Installed
+
+Requirements:
+ * MinGW
+ * Cygwin, including Python
+ * Python from www.python.org _(yes, you need two Python installations!)_
+
+Building:
+ 1. Open a MinGW shell
+ 1. `export PATH=$PATH:/c/cygwin/bin` _(or wherever you installed Cygwin)_
+ 1. `make ia32.release -j8`
+
+Running tests:
+ 1. Open a MinGW shell
+ 1. `export PATH=/c/Python27:$PATH` _(or wherever you installed Python)_
+ 1. `make ia32.release.check -j8`
+
+#### Option 2: Without Cygwin, just MinGW
+
+Requirements:
+ * MinGW
+ * Python from www.python.org
+
+Building and testing:
+ 1. Open a MinGW shell
+ 1. `tools/mingw-generate-makefiles.sh` _(re-run this any time a `*`.gyp`*` file changed, such as after updating your checkout)_
+ 1. `make ia32.release` _(unfortunately -jX doesn't seem to work here)_
+ 1. `make ia32.release.check -j8`
+
+
+# Final Note
+<font color='darkred'><b>If you have problems or questions, please file bugs at code.google.com/p/v8/issues or send mail to v8-users@googlegroups.com. Comments on this page are likely to go unnoticed and unanswered.</b></font> \ No newline at end of file
diff --git a/deps/v8/docs/contributing.md b/deps/v8/docs/contributing.md
new file mode 100644
index 0000000000..aa8e665976
--- /dev/null
+++ b/deps/v8/docs/contributing.md
@@ -0,0 +1,32 @@
+Here you will find information that you'll need to be able to contribute to V8. Be sure to read the whole thing before sending us a contribution, including the small print at the end.
+
+## Before you contribute
+
+Before you start working on a larger contribution V8 you should get in touch with us first through the V8 [contributor mailing list](http://groups.google.com/group/v8-dev) so we can help out and possibly guide you; coordinating up front makes it much easier to avoid frustration later on.
+
+## Getting the code
+
+See [UsingGit](using_git.md).
+
+## Submitting code
+
+The source code of V8 follows the [Google C++ Style Guide](http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml) so you should familiarize yourself with those guidelines. Before submitting code you must pass all our [tests](http://code.google.com/p/v8-wiki/wiki/Testing), and have to successfully run the presubmit checks:
+
+> `tools/presubmit.py`
+
+The presubmit script uses a linter from Google, `cpplint.py`. External contributors can get this from [here](http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py) and place it in their path.
+
+All submissions, including submissions by project members, require review. We use the same code-review tools and process as the chromium project. In order to submit a patch, you need to get the [depot\_tools](http://dev.chromium.org/developers/how-tos/install-depot-tools) and follow these instructions on [requesting a review](http://dev.chromium.org/developers/contributing-code) (using your V8 workspace instead of a chromium workspace).
+
+### Look out for breakage or regressions
+
+Before submitting your code please check the [buildbot console](http://build.chromium.org/p/client.v8/console) to see that the columns are mostly green before checking in your changes. Otherwise you will not know if your changes break the build or not. When your change is committed watch the [buildbot console](http://build.chromium.org/p/client.v8/console) until the bots turn green after your change.
+
+
+## The small print
+
+Before we can use your code you have to sign the [Google Individual Contributor License Agreement](http://code.google.com/legal/individual-cla-v1.0.html), which you can do online. This is mainly because you own the copyright to your changes, even after your contribution becomes part of our codebase, so we need your permission to use and distribute your code. We also need to be sure of various other things, for instance that you'll tell us if you know that your code infringes on other people's patents. You don't have to do this until after you've submitted your code for review and a member has approved it, but you will have to do it before we can put your code into our codebase.
+
+Contributions made by corporations are covered by a different agreement than the one above, the [Software Grant and Corporate Contributor License Agreement](http://code.google.com/legal/corporate-cla-v1.0.html).
+
+Sign them online [here](https://cla.developers.google.com/) \ No newline at end of file
diff --git a/deps/v8/docs/cross_compiling_for_arm.md b/deps/v8/docs/cross_compiling_for_arm.md
new file mode 100644
index 0000000000..68464eff1f
--- /dev/null
+++ b/deps/v8/docs/cross_compiling_for_arm.md
@@ -0,0 +1,151 @@
+<font color='darkred'><b><h2>Building V8 with SCons is no longer supported. See <a href='https://code.google.com/p/v8-wiki/wiki/BuildingWithGYP'>BuildingWithGYP</a>.</h2></b></font>
+
+---
+
+
+# Using Sourcery G++ Lite
+
+The Sourcery G++ Lite cross compiler suite is a free version of Sourcery G++ from [CodeSourcery](http://www.codesourcery.com). There is a page for the [GNU Toolchain for ARM Processors](http://www.codesourcery.com/sgpp/lite/arm). Determine the version you need for your host/target combination.
+
+The following instructions uses [2009q1-203 for ARM GNU/Linux](http://www.codesourcery.com/sgpp/lite/arm/portal/release858), and if using a different version please change the URLs and `TOOL_PREFIX` below accordingly.
+
+## Installing on host and target
+
+The simplest way of setting this up is to install the full Sourcery G++ Lite package on both the host and target at the same location. This will ensure that all the libraries required are available on both sides. If you want to use the default libraries on the host there is no need the install anything on the target.
+
+The following script will install in `/opt/codesourcery`:
+
+```
+#!/bin/sh
+
+sudo mkdir /opt/codesourcery
+cd /opt/codesourcery
+sudo chown $USERNAME .
+chmod g+ws .
+umask 2
+wget http://www.codesourcery.com/sgpp/lite/arm/portal/package4571/public/arm-none-linux-gnueabi/arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
+tar -xvf arm-2009q1-203-arm-none-linux-gnueabi-i686-pc-linux-gnu.tar.bz2
+```
+
+
+## Building using scons without snapshot
+
+The simplest way to build is without snapshot, as that does no involve using the simulator to generate the snapshot. The following script will build the sample shell without snapshot for ARM v7.
+
+```
+#!/bin/sh
+
+export TOOL_PREFIX=/opt/codesourcery/arm-2009q1/bin/arm-none-linux-gnueabi
+export CXX=$TOOL_PREFIX-g++
+export AR=$TOOL_PREFIX-ar
+export RANLIB=$TOOL_PREFIX-ranlib
+export CC=$TOOL_PREFIX-gcc
+export LD=$TOOL_PREFIX-ld
+
+export CCFLAGS="-march=armv7-a -mtune=cortex-a8 -mfpu=vfp"
+export ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
+
+scons wordsize=32 snapshot=off arch=arm sample=shell
+```
+
+If the processor is not Cortex A8 or does not have VFP enabled the `-mtune=cortex-a8` and `-mfpu=vfp` part of `CCFLAGS` needs to be changed accordingly. By default the V8 SCons build adds `-mfloat-abi=softfp`.
+
+If using the default libraries on the target just leave out the setting of `ARM_TARGET_LIB` and if the target libraies are in a different location ARM\_TARGET\_LIB` needs to be adjusted accordingly.
+
+The default for Sourcery G++ Lite is ARM v5te with software floating point emulation, so if testing building for ARM v5te the setting of `CCFLAGS` and `ARM_TARGET_LIB` should be changed to:
+
+```
+CCFLAGS=""
+ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
+
+scons armeabi=soft ...
+```
+
+Relying on defaults in the tool chain might lead to surprises, so for ARM v5te with software floating point emulation the following is more explicit:
+
+```
+CCFLAGS="-march=armv5te"
+ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
+
+scons armeabi=soft ...
+```
+
+If the target has an VFP unit use the following:
+
+```
+CCFLAGS="-mfpu=vfpv3"
+ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc
+```
+
+To allow G++ to use Thumb2 instructions and the VFP unit when compiling the C/C++ code use:
+
+```
+CCFLAGS="-mthumb -mfpu=vfpv3"
+ARM_TARGET_LIB=/opt/codesourcery/arm-2009q1/arm-none-linux-gnueabi/libc/thumb2
+```
+
+_Note:_ V8 will not use Thumb2 instructions in its generated code it always uses the full ARM instruction set.
+
+For other ARM versions please check the Sourcery G++ Lite documentation.
+
+As mentioned above the default for Sourcery G++ Lite used here is ARM v5te with software floating point emulation. However beware that this default might change between versions and that there is no unique defaults for ARM tool chains in general, so always passing `-march` and possibly `-mfpu` is recommended. Passing `-mfloat-abi` is not required as this is controlled by the SCons option `armeabi`.
+
+## Building using scons with snapshot
+
+When building with snapshot the simulator is used to build the snapshot on the host and then building for the target with that snapshot. The following script will accomplish that (using both Thumb2 and VFP instructions):
+
+```
+#!/bin/sh
+
+V8DIR=..
+
+cd host
+
+scons -Y$V8DIR simulator=arm snapshot=on
+mv obj/release/snapshot.cc $V8DIR/src/snapshot.cc
+
+cd ..
+
+export TOOL_PREFIX=/opt/codesourcery/arm-2010.09-103/bin/arm-none-linux-gnueabi
+export CXX=$TOOL_PREFIX-g++
+export AR=$TOOL_PREFIX-ar
+export RANLIB=$TOOL_PREFIX-ranlib
+export CC=$TOOL_PREFIX-gcc
+export LD=$TOOL_PREFIX-ld
+
+export CCFLAGS="-mthumb -march=armv7-a -mfpu=vfpv3"
+export ARM_TARGET_LIB=/opt/codesourcery/arm-2010.09-103/arm-none-linux-gnueabi/libc/thumb2
+
+cd target
+
+scons -Y$V8DIR wordsize=32 snapshot=nobuild arch=armsample=shell
+rm $V8DIR/src/snapshot.cc
+
+cd ..
+```
+
+This script required the two subdirectories `host` and `target`. V8 is first build for the host with the ARM simulator which supports running ARM code on the host. This is used to build a snapshot file which is then used for the actual cross compilation of V8.
+
+## Building for target which supports unaligned access
+
+The default when building V8 for an ARM target (either cross compiling or compiling on an ARM machine) is to disable unaligned memory access. However in some situations (most noticeably handling of regular expressions) performance will be better if unaligned memory access is used on processors which supports it. To enable unaligned memory access set `unalignedaccesses` to `on` when building:
+
+```
+scons unalignedaccesses=on ...
+```
+
+When running in the simulator the default is to enable unaligned memory access, so to test in the simulator with unaligned memory access disabled set `unalignedaccesses` to `off` when building:
+
+```
+scons unalignedaccesses=off simulator=arm ...
+```
+
+## Using V8 with hardfp calling convention
+
+By default V8 uses the softfp calling convention when calling C functions from generated code. However it is possible to use hardfp as well. To enable this set `armeabi` to `hardfp` when building:
+
+```
+scons armeabi=hardfp ...
+```
+
+Passing `armeabi=hardfp` to SCons will automatically set the compiler flag `-mfloat-abi=hardfp`. If using snapshots remember to pass `armeabi=hardfp` when building V8 on the host for generating the snapshot as well. \ No newline at end of file
diff --git a/deps/v8/docs/d8_on_android.md b/deps/v8/docs/d8_on_android.md
new file mode 100644
index 0000000000..eda6419345
--- /dev/null
+++ b/deps/v8/docs/d8_on_android.md
@@ -0,0 +1,101 @@
+# Prerequisites
+ * a Linux/Mac workstation
+ * v8 r12178 (on Google Code) or later
+ * an Android emulator or device with matching USB cable
+ * make sure [building with GYP](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP) works
+
+
+# Get the code
+
+ * Use the instructions from https://code.google.com/p/v8-wiki/wiki/UsingGit to get the code
+ * Once you need to add the android dependencies:
+```
+v8$ echo "target_os = ['android']" >> ../.gclient && gclient sync --nohooks
+```
+ * The sync will take a while the first time as it downloads the Android NDK to v8/third\_party
+ * If you want to use a different NDK, you need to set the gyp variable android\_ndk\_root
+
+
+# Get the Android SDK
+ * tested version: `r15`
+ * download the SDK from http://developer.android.com/sdk/index.html
+ * extract it
+ * install the "Platform tools" using the SDK manager that you can start by running `tools/android`
+ * now you have a `platform_tools/adb` binary which will be used later; put it in your `PATH` or remember where it is
+
+
+# Set up your device
+ * Enable USB debugging (Gingerbread: Settings > Applications > Development > USB debugging; Ice Cream Sandwich: Settings > Developer Options > USB debugging)
+ * connect your device to your workstation
+ * make sure `adb devices` shows it; you may have to edit `udev` rules to give yourself proper permissions
+ * run `adb shell` to get an ssh-like shell on the device. In that shell, do:
+```
+cd /data/local/tmp
+mkdir v8
+cd v8
+```
+
+
+# Push stuff onto the device
+ * make sure your device is connected
+ * from your workstation's shell:
+```
+adb push /file/you/want/to/push /data/local/tmp/v8/
+```
+
+
+# Compile V8 for Android
+Currently two architectures (`android_arm` and `android_ia32`) are supported, each in `debug` or `release` mode. The following steps work equally well for both ARM and ia32, on either the emulator or real devices.
+ * compile:
+```
+make android_arm.release -j16
+```
+ * push the resulting binary to the device:
+```
+adb push out/android_arm.release/d8 /data/local/tmp/v8/d8
+```
+ * the most comfortable way to run it is from your workstation's shell as a one-off command (rather than starting an interactive shell session on the device), that way you can use pipes or whatever to process the output as necessary:
+```
+adb shell /data/local/tmp/v8/d8 <parameters>
+```
+ * warning: when you cancel such an "adb shell whatever" command using Ctrl+C, the process on the phone will sometimes keep running.
+ * Alternatively, use the `.check` suffix to automatically push test binaries and test cases onto the device and run them.
+```
+make android_arm.release.check
+```
+
+
+# Profile
+ * compile a binary, push it to the device, keep a copy of it on the host
+```
+make android_arm.release -j16
+adb push out/android_arm.release/d8 /data/local/tmp/v8/d8-version.under.test
+cp out/android_arm.release/d8 ./d8-version.under.test
+```
+ * get a profiling log and copy it to the host:
+```
+adb shell /data/local/tmp/v8/d8-version.under.test benchmark.js --prof
+adb pull /data/local/tmp/v8/v8.log ./
+```
+ * open `v8.log` in your favorite editor and edit the first line to match the full path of the `d8-version.under.test` binary on your workstation (instead of the `/data/local/tmp/v8/` path it had on the device)
+ * run the tick processor with the host's `d8` and an appropriate `nm` binary:
+```
+cp out/ia32.release/d8 ./d8 # only required once
+tools/linux-tick-processor --nm=$ANDROID_NDK_ROOT/toolchain/bin/arm-linux-androideabi-nm
+```
+
+# Compile SpiderMonkey for Lollipop
+```
+cd firefox/js/src
+autoconf2.13
+./configure \
+ --target=arm-linux-androideabi \
+ --with-android-ndk=$ANDROID_NDK_ROOT \
+ --with-android-version=21 \
+ --without-intl-api \
+ --disable-tests \
+ --enable-android-libstdcxx \
+ --enable-pie
+make
+adb push -p js/src/shell/js /data/local/tmp/js
+``` \ No newline at end of file
diff --git a/deps/v8/docs/debugger_protocol.md b/deps/v8/docs/debugger_protocol.md
new file mode 100644
index 0000000000..6b6b448a0f
--- /dev/null
+++ b/deps/v8/docs/debugger_protocol.md
@@ -0,0 +1,934 @@
+# Introduction
+
+V8 has support for debugging the JavaScript code running in it. There are two API's for this a function based API using JavaScript objects and a message based API using a JSON based protocol. The function based API can be used by an in-process debugger agent, whereas the message based API can be used out of process as well.
+**> The message based API is no longer maintained. Please ask in v8-users@googlegroups.com if you want to attach a debugger to the run-time.**
+
+The debugger protocol is based on [JSON](http://www.json.org/)). Each protocol packet is defined in terms of JSON and is transmitted as a string value. All packets have two basic elements `seq` and `type`.
+
+```
+{ "seq" : <number>,
+ "type" : <type>,
+ ...
+}
+```
+
+The element `seq` holds the sequence number of the packet. And element type is the type of the packet. The type is a string value with one of the following values `"request"`, `"response"` or `"event"`.
+
+A `"request"` packet has the following structure:
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : <command>
+ "arguments" : ...
+}
+```
+
+A `"response"` packet has the following structure. If `success` is true `body` will contain the response data. If `success` is false `message` will contain an error message.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : <command>
+ "body" : ...
+ "running" : <is the VM running after sending this response>
+ "success" : <boolean indicating success>
+ "message" : <if command failed this property contains an error message>
+}
+```
+
+An `"event"` packet has the following structure:
+
+```
+{ "seq" : <number>,
+ "type" : "event",
+ "event" : <event name>
+ body : ...
+}
+```
+
+# Request/response pairs
+
+## Request `continue`
+
+The request `continue` is a request from the debugger to start the VM running again. As part of the `continue` request the debugger can specify if it wants the VM to perform a single step action.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "continue",
+ "arguments" : { "stepaction" : <"in", "next" or "out">,
+ "stepcount" : <number of steps (default 1)>
+ }
+}
+```
+
+In the response the property `running` will always be true as the VM will be running after executing the `continue` command. If a single step action is requested the VM will respond with a `break` event after running the step.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "continue",
+ "running" : true
+ "success" : true
+}
+```
+
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"continue"}
+{"seq":118,"type":"request","command":"continue","arguments":{"stepaction":"out"}}
+{"seq":119,"type":"request","command":"continue","arguments":{"stepaction":"next","stepcount":5}}
+```
+
+## Request `evaluate`
+
+The request `evaluate` is used to evaluate an expression. The body of the result is as described in response object serialization below.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "evaluate",
+ "arguments" : { "expression" : <expression to evaluate>,
+ "frame" : <number>,
+ "global" : <boolean>,
+ "disable_break" : <boolean>,
+ "additional_context" : [
+ { "name" : <name1>, "handle" : <handle1> },
+ { "name" : <name2>, "handle" : <handle2> },
+ ...
+ ]
+ }
+}
+```
+
+Optional argument `additional_context` specifies handles that will be visible from the expression under corresponding names (see example below).
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "evaluate",
+ "body" : ...
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"evaluate","arguments":{"expression":"1+2"}}
+{"seq":118,"type":"request","command":"evaluate","arguments":{"expression":"a()","frame":3,"disable_break":false}}
+{"seq":119,"type":"request","command":"evaluate","arguments":{"expression":"[o.a,o.b,o.c]","global":true,"disable_break":true}}
+{"seq":120,"type":"request","command":"evaluate","arguments":{"expression":"obj.toString()", "additional_context": [{ "name":"obj","handle":25 }] }}
+```
+
+## Request `lookup`
+
+The request `lookup` is used to lookup objects based on their handle. The individual array elements of the body of the result is as described in response object serialization below.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "lookup",
+ "arguments" : { "handles" : <array of handles>,
+ "includeSource" : <boolean indicating whether the source will be included when script objects are returned>,
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "lookup",
+ "body" : <array of serialized objects indexed using their handle>
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"lookup","arguments":{"handles":"[1]"}}
+{"seq":118,"type":"request","command":"lookup","arguments":{"handles":"[7,12]"}}
+```
+
+## Request `backtrace`
+
+The request `backtrace` returns a backtrace (or stacktrace) from the current execution state. When issuing a request a range of frames can be supplied. The top frame is frame number 0. If no frame range is supplied data for 10 frames will be returned.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "backtrace",
+ "arguments" : { "fromFrame" : <number>
+ "toFrame" : <number>
+ "bottom" : <boolean, set to true if the bottom of the stack is requested>
+ }
+}
+```
+
+The response contains the frame data together with the actual frames returned and the toalt frame count.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "backtrace",
+ "body" : { "fromFrame" : <number>
+ "toFrame" : <number>
+ "totalFrames" : <number>
+ "frames" : <array of frames - see frame request for details>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+If there are no stack frames the result body only contains `totalFrames` with a value of `0`. When an exception event is generated due to compilation failures it is possible that there are no stack frames.
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"backtrace"}
+{"seq":118,"type":"request","command":"backtrace","arguments":{"toFrame":2}}
+{"seq":119,"type":"request","command":"backtrace","arguments":{"fromFrame":0,"toFrame":9}}
+```
+
+## Request `frame`
+
+The request frame selects a new selected frame and returns information for that. If no frame number is specified the selected frame is returned.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "frame",
+ "arguments" : { "number" : <frame number>
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "frame",
+ "body" : { "index" : <frame number>,
+ "receiver" : <frame receiver>,
+ "func" : <function invoked>,
+ "script" : <script for the function>,
+ "constructCall" : <boolean indicating whether the function was called as constructor>,
+ "debuggerFrame" : <boolean indicating whether this is an internal debugger frame>,
+ "arguments" : [ { name: <name of the argument - missing of anonymous argument>,
+ value: <value of the argument>
+ },
+ ... <the array contains all the arguments>
+ ],
+ "locals" : [ { name: <name of the local variable>,
+ value: <value of the local variable>
+ },
+ ... <the array contains all the locals>
+ ],
+ "position" : <source position>,
+ "line" : <source line>,
+ "column" : <source column within the line>,
+ "sourceLineText" : <text for current source line>,
+ "scopes" : [ <array of scopes, see scope request below for format> ],
+
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"frame"}
+{"seq":118,"type":"request","command":"frame","arguments":{"number":1}}
+```
+
+## Request `scope`
+
+The request scope returns information on a givne scope for a givne frame. If no frame number is specified the selected frame is used.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "scope",
+ "arguments" : { "number" : <scope number>
+ "frameNumber" : <frame number, optional uses selected frame if missing>
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "scope",
+ "body" : { "index" : <index of this scope in the scope chain. Index 0 is the top scope
+ and the global scope will always have the highest index for a
+ frame>,
+ "frameIndex" : <index of the frame>,
+ "type" : <type of the scope:
+ 0: Global
+ 1: Local
+ 2: With
+ 3: Closure
+ 4: Catch >,
+ "object" : <the scope object defining the content of the scope.
+ For local and closure scopes this is transient objects,
+ which has a negative handle value>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"scope"}
+{"seq":118,"type":"request","command":"scope","arguments":{"frameNumber":1,"number":1}}
+```
+
+## Request `scopes`
+
+The request scopes returns all the scopes for a given frame. If no frame number is specified the selected frame is returned.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "scopes",
+ "arguments" : { "frameNumber" : <frame number, optional uses selected frame if missing>
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "scopes",
+ "body" : { "fromScope" : <number of first scope in response>,
+ "toScope" : <number of last scope in response>,
+ "totalScopes" : <total number of scopes for this frame>,
+ "scopes" : [ <array of scopes, see scope request above for format> ],
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"scopes"}
+{"seq":118,"type":"request","command":"scopes","arguments":{"frameNumber":1}}
+```
+
+## Request `scripts`
+
+The request `scripts` retrieves active scripts from the VM. An active script is source code from which there is still live objects in the VM. This request will always force a full garbage collection in the VM.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "scripts",
+ "arguments" : { "types" : <types of scripts to retrieve
+ set bit 0 for native scripts
+ set bit 1 for extension scripts
+ set bit 2 for normal scripts
+ (default is 4 for normal scripts)>
+ "ids" : <array of id's of scripts to return. If this is not specified all scripts are requrned>
+ "includeSource" : <boolean indicating whether the source code should be included for the scripts returned>
+ "filter" : <string or number: filter string or script id.
+ If a number is specified, then only the script with the same number as its script id will be retrieved.
+ If a string is specified, then only scripts whose names contain the filter string will be retrieved.>
+ }
+}
+```
+
+The request contains an array of the scripts in the VM. This information includes the relative location of the script within the containing resource.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "scripts",
+ "body" : [ { "name" : <name of the script>,
+ "id" : <id of the script>
+ "lineOffset" : <line offset within the containing resource>
+ "columnOffset" : <column offset within the containing resource>
+ "lineCount" : <number of lines in the script>
+ "data" : <optional data object added through the API>
+ "source" : <source of the script if includeSource was specified in the request>
+ "sourceStart" : <first 80 characters of the script if includeSource was not specified in the request>
+ "sourceLength" : <total length of the script in characters>
+ "scriptType" : <script type (see request for values)>
+ "compilationType" : < How was this script compiled:
+ 0 if script was compiled through the API
+ 1 if script was compiled through eval
+ >
+ "evalFromScript" : <if "compilationType" is 1 this is the script from where eval was called>
+ "evalFromLocation" : { line : < if "compilationType" is 1 this is the line in the script from where eval was called>
+ column : < if "compilationType" is 1 this is the column in the script from where eval was called>
+ ]
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"scripts"}
+{"seq":118,"type":"request","command":"scripts","arguments":{"types":7}}
+```
+
+## Request `source`
+
+The request `source` retrieves source code for a frame. It returns a number of source lines running from the `fromLine` to but not including the `toLine`, that is the interval is open on the "to" end. For example, requesting source from line 2 to 4 returns two lines (2 and 3). Also note that the line numbers are 0 based: the first line is line 0.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "source",
+ "arguments" : { "frame" : <frame number (default selected frame)>
+ "fromLine" : <from line within the source default is line 0>
+ "toLine" : <to line within the source this line is not included in
+ the result default is the number of lines in the script>
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "source",
+ "body" : { "source" : <the source code>
+ "fromLine" : <actual from line within the script>
+ "toLine" : <actual to line within the script this line is not included in the source>
+ "fromPosition" : <actual start position within the script>
+ "toPosition" : <actual end position within the script>
+ "totalLines" : <total lines in the script>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"source","arguments":{"fromLine":10,"toLine":20}}
+{"seq":118,"type":"request","command":"source","arguments":{"frame":2,"fromLine":10,"toLine":20}}
+```
+
+## Request `setbreakpoint`
+
+The request `setbreakpoint` creates a new break point. This request can be used to set both function and script break points. A function break point sets a break point in an existing function whereas a script break point sets a break point in a named script. A script break point can be set even if the named script is not found.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "setbreakpoint",
+ "arguments" : { "type" : <"function" or "script" or "scriptId" or "scriptRegExp">
+ "target" : <function expression or script identification>
+ "line" : <line in script or function>
+ "column" : <character position within the line>
+ "enabled" : <initial enabled state. True or false, default is true>
+ "condition" : <string with break point condition>
+ "ignoreCount" : <number specifying the number of break point hits to ignore, default value is 0>
+ }
+}
+```
+
+The result of the `setbreakpoint` request is a response with the number of the newly created break point. This break point number is used in the `changebreakpoint` and `clearbreakpoint` requests.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "setbreakpoint",
+ "body" : { "type" : <"function" or "script">
+ "breakpoint" : <break point number of the new break point>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f"}}
+{"seq":118,"type":"request","command":"setbreakpoint","arguments":{type:"script","target":"test.js","line":100}}
+{"seq":119,"type":"request","command":"setbreakpoint","arguments":{"type":"function,"target":"f","condition":"i > 7"}}
+```
+
+
+## Request `changebreakpoint`
+
+The request `changebreakpoint` changes the status of a break point.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "changebreakpoint",
+ "arguments" : { "breakpoint" : <number of the break point to clear>
+ "enabled" : <initial enabled state. True or false, default is true>
+ "condition" : <string with break point condition>
+ "ignoreCount" : <number specifying the number of break point hits }
+}
+```
+
+## Request `clearbreakpoint`
+
+The request `clearbreakpoint` clears a break point.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "clearbreakpoint",
+ "arguments" : { "breakpoint" : <number of the break point to clear>
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "clearbreakpoint",
+ "body" : { "type" : <"function" or "script">
+ "breakpoint" : <number of the break point cleared>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"request","command":"clearbreakpoint","arguments":{"type":"function,"breakpoint":1}}
+{"seq":118,"type":"request","command":"clearbreakpoint","arguments":{"type":"script","breakpoint":2}}
+```
+
+## Request `setexceptionbreak`
+
+The request `setexceptionbreak` is a request to enable/disable breaks on all / uncaught exceptions. If the "enabled" argument is not specify, the debuggee will toggle the state of the specified break type.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "setexceptionbreak",
+ "arguments" : { "type" : <string: "all", or "uncaught">,
+ "enabled" : <optional bool: enables the break type if true>
+ }
+}
+```
+
+In response, the break on exception property of the debuggee will be set accordingly, and the following response message will be dispatched to the debugger.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "setexceptionbreak",
+ ā€œbodyā€ : { "type" : <string: "all" or "uncaught" corresponding to the request.>,
+ "enabled" : <bool: true if the break type is currently enabled as a result of the request>
+ }
+ "running" : true
+ "success" : true
+}
+```
+
+Here are a few examples.
+
+```
+{"seq":117,"type":"request","command":"setexceptionbreak","arguments":{"type":"all"}}
+{"seq":118,"type":"request","command":" setexceptionbreak","arguments":{"type":"all",ā€enabledā€:false}}
+{"seq":119,"type":"request","command":" setexceptionbreak","arguments":{"type":"uncaught","enabled":true}}
+```
+
+## Request `v8flags`
+The request v8flags is a request to apply the specified v8 flags (analogous to how they are specified on the command line).
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "v8flags",
+ "arguments" : { "flags" : <string: a sequence of v8 flags just like those used on the command line>
+ }
+}
+```
+
+In response, the specified flags will be applied in the debuggee if they are legal flags. Their effects vary depending on the implementation of the flag.
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "v8flags",
+ "running" : true
+ "success" : true
+}
+```
+
+Here are a few examples.
+
+```
+{"seq":117,"type":"request","command":"v8flags","arguments":{"flags":"--trace_gc ā€”always_compact"}}
+{"seq":118,"type":"request","command":" v8flags","arguments":{"flags":"--notrace_gc"}}
+```
+
+## Request `version`
+
+The request `version` reports version of the running V8.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "version",
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "type" : "request",
+ "body" : { "V8Version": <string, version of V8>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here is an example.
+
+```
+{"seq":1,"type":"request","command":"version"}
+{"seq":134,"request_seq":1,"type":"response","command":"version","success":true,"body":{"V8Version":"1.3.19 (candidate)"},"refs":[],"running":false}
+```
+
+## Request `disconnect`
+
+The request `disconnect` is used to detach the remote debugger from the debuggee. This will trigger the debuggee to disable all active breakpoints and resumes execution if the debuggee was previously stopped at a break.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "disconnect",
+}
+```
+
+The only response for the `disconnect` request is the response to a connect request if the debugger is still able to get a response before the debuggee successfully disconnects.
+
+Here is an examples:
+
+```
+{"seq":117,"type":"request","command":"disconnect"}
+```
+
+## Request `gc`
+The request `gc` is a request to run the garbage collector in the debuggee.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "gc",
+ "arguments" : { "type" : <string: "all">,
+ }
+}
+```
+
+In response, the debuggee will run the specified GC type and send the following response message:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "gc",
+ ā€œbodyā€ : { "before" : <int: total heap usage in bytes before the GC>,
+ "after" : <int: total heap usage in bytes after the GC>
+ }
+ "running" : true
+ "success" : true
+}
+```
+
+Here is an example.
+
+```
+{"seq":117,"type":"request","command":"gc","arguments":{"type":"all"}}
+```
+
+## Request `listbreakpoints`
+
+The request `listbreakpoints` is used to get information on breakpoints that may have been set by the debugger.
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "listbreakpoints",
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "command" : "listbreakpoints",
+ "body" : { "breakpoints": [ { "type" : <string: "scriptId" or "scriptName".>,
+ "script_id" : <int: script id. Only defined if type is scriptId.>,
+ "script_name" : <string: script name. Only defined if type is scriptName.>,
+ "number" : <int: breakpoint number. Starts from 1.>,
+ "line" : <int: line number of this breakpoint. Starts from 0.>,
+ "column" : <int: column number of this breakpoint. Starts from 0.>,
+ "groupId" : <int: group id of this breakpoint.>,
+ "hit_count" : <int: number of times this breakpoint has been hit. Starts from 0.>,
+ "active" : <bool: true if this breakpoint is enabled.>,
+ "ignoreCount" : <int: remaining number of times to ignore breakpoint. Starts from 0.>,
+ "actual_locations" : <actual locations of the breakpoint.>,
+ }
+ ],
+ "breakOnExceptions" : <true if break on all exceptions is enabled>,
+ "breakOnUncaughtExceptions" : <true if break on uncaught exceptions is enabled>
+ }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+Here is an examples:
+
+```
+{"seq":117,"type":"request","command":"listbreakpoints"}
+```
+
+
+## Request `setvariablevalue`
+This requests sets the value of a variable from the specified scope.
+
+Request:
+
+```
+{ "seq" : <number>,
+ "type" : "request",
+ "command" : "setvariablevalue",
+ "arguments : { "name" : <string: variable name>,
+ "scope" : { "number" : <scope number>
+ "frameNumber" : <frame number, optional uses selected frame if missing>
+ }
+ }
+}
+```
+
+Response:
+
+```
+{ "seq" : <number>,
+ "type" : "response",
+ "request_seq" : <number>,
+ "type" : "request",
+ "body" : { "newValue": <object: mirror object of the new value> }
+ "running" : <is the VM running after sending this response>
+ "success" : true
+}
+```
+
+# Events
+
+## Event `break`
+
+The event `break` indicate that the execution in the VM has stopped due to a break condition. This can be caused by an unconditional break request, by a break point previously set, a stepping action have completed or by executing the `debugger` statement in JavaScript.
+
+```
+{ "seq" : <number>,
+ "type" : "event",
+
+ "event" : "break",
+ "body" : { "invocationText" : <text representation of the stack frame>,
+ "sourceLine" : <source line where execution is stopped>,
+ "sourceColumn" : <column within the source line where execution is stopped>,
+ "sourceLineText" : <text for the source line where execution is stopped>,
+ "script" : { name : <resource name of the origin of the script>
+ lineOffset : <line offset within the origin of the script>
+ columnOffset : <column offset within the origin of the script>
+ lineCount : <number of lines in the script>
+ "breakpoints" : <array of break point numbers hit if any>
+ }
+}
+```
+
+Here are a couple of examples.
+
+```
+{"seq":117,"type":"event","event":"break","body":{"functionName":"f","sourceLine":1,"sourceColumn":14}}
+{"seq":117,"type":"event","event":"break","body":{"functionName":"g","scriptData":"test.js","sourceLine":12,"sourceColumn":22,"breakpoints":[1]}}
+{"seq":117,"type":"event","event":"break","body":{"functionName":"h","sourceLine":100,"sourceColumn":12,"breakpoints":[3,5,7]}}
+```
+
+## Event `exception`
+
+The event `exception` indicate that the execution in the VM has stopped due to an exception.
+
+```
+{ "seq" : <number>,
+ "type" : "event",
+ "event" : "exception",
+ "body" : { "uncaught" : <boolean>,
+ "exception" : ...
+ "sourceLine" : <source line where the exception was thrown>,
+ "sourceColumn" : <column within the source line from where the exception was thrown>,
+ "sourceLineText" : <text for the source line from where the exception was thrown>,
+ "script" : { "name" : <name of script>
+ "lineOffset" : <number>
+ "columnOffset" : <number>
+ "lineCount" : <number>
+ }
+
+ }
+}
+```
+
+# Response object serialization
+
+Some responses contain objects as part of the body, e.g. the response to the evaluate request contains the result of the expression evaluated.
+
+All objects exposed through the debugger is assigned an ID called a handle. This handle is serialized and can be used to identify objects. A handle has a certain lifetime after which it will no longer refer to the same object. Currently the lifetime of handles match the processing of a debug event. For each debug event handles are recycled.
+
+An object can be serialized either as a reference to a given handle or as a value representation containing the object content.
+
+An object serialized as a reference looks follows this where `<handle>` is an integer.
+
+```
+{"ref":<handle>}
+```
+
+For objects serialized as value they all contains the handle and the type of the object.
+
+```
+{ "handle" : <handle>,
+ "type" : <"undefined", "null", "boolean", "number", "string", "object", "function" or "frame">
+}
+```
+
+In some situations special transient objects are created by the debugger. These objects are not really visible in from JavaScript, but are created to materialize something inside the VM as an object visible to the debugger. One example of this is the local scope object returned from the `scope` and `scopes` request. Transient objects are identified by having a negative handle. A transient object can never be retrieved using the `lookup` request, so all transient objects referenced will be in the `refs` part of the response. The lifetime of transient objects is basically the request they are involved in.
+
+For the primitive JavaScript types `undefined` and `null` the type describes the value fully.
+
+```
+{"handle":<handle>,"type":"undefined"}
+```
+
+```
+{"handle":<handle>,"type":"null"}
+```
+
+For the rest of the primitive types `boolean`, `number` and `string` the value is part of the result.
+
+```
+{ "handle":<handle>,
+ "type" : <"boolean", "number" or "string">
+ "value" : <JSON encoded value>
+}
+```
+
+Boolean value.
+
+```
+{"handle":7,"type":"boolean","value":true}
+```
+
+Number value.
+
+```
+{"handle":8,"type":"number","value":42}
+```
+
+String value.
+
+```
+{"handle":9,"type":"string","value":"a string"}
+```
+
+An object is encoded with additional information.
+
+```
+{ "handle" : <handle>,
+ "type" : "object",
+ "className" : <Class name, ECMA-262 property [[Class]]>,
+ "constructorFunction" : {"ref":<handle>},
+ "protoObject" : {"ref":<handle>},
+ "prototypeObject" : {"ref":<handle>},
+ "properties" : [ {"name" : <name>,
+ "ref" : <handle>
+ },
+ ...
+ ]
+}
+```
+
+The difference between the `protoObject` and the `prototypeObject` is that the `protoObject` contains a reference to the actual prototype object (for which accessibility is not defined in ECMA-262, but in V8 it is accessible using the `__proto__` property) whereas the `prototypeObject` is the value of the `prototype` property.
+
+Here is an example.
+
+```
+{"handle":3,"type":"object","className":"Object","constructorFunction":{"ref":4},"protoObject":{"ref":5},"prototypeObject":{"ref":6},"properties":[{"name":"a","ref:7},{"name":"b","ref":8}]}
+```
+
+An function is encoded as an object but with additional information in the properties `name`, `inferredName`, `source` and `script`.
+
+```
+{ "handle" : <handle>,
+ "type" : "function",
+ "className" : "Function",
+ "constructorFunction" : {"ref":<handle>},
+ "protoObject" : {"ref":<handle>},
+ "prototypeObject" : {"ref":<handle>},
+ "name" : <function name>,
+ "inferredName" : <inferred function name for anonymous functions>
+ "source" : <function source>,
+ "script" : <reference to function script>,
+ "scriptId" : <id of function script>,
+ "position" : <function begin position in script>,
+ "line" : <function begin source line in script>,
+ "column" : <function begin source column in script>,
+ "properties" : [ {"name" : <name>,
+ "ref" : <handle>
+ },
+ ...
+ ]
+}
+``` \ No newline at end of file
diff --git a/deps/v8/docs/gdb_jit_interface.md b/deps/v8/docs/gdb_jit_interface.md
new file mode 100644
index 0000000000..753626cf6f
--- /dev/null
+++ b/deps/v8/docs/gdb_jit_interface.md
@@ -0,0 +1,63 @@
+# Prerequisites
+
+ * V8 3.0.9 or newer
+ * GDB 7.0 or newer
+ * Linux OS
+ * CPU with Intel-compatible architecture (ia32 or x64)
+
+# Introduction
+
+GDB JIT interface integration allows V8 to provide GDB with the symbol and debugging information for a native code emitted in runtime.
+
+When GDB JIT interface is disabled a typical backtrace in GDB will contain frames marked with ??. This frames correspond to dynamically generated code:
+
+```
+#8 0x08281674 in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
+#9 0xf5cae28e in ?? ()
+#10 0xf5cc3a0a in ?? ()
+#11 0xf5cc38f4 in ?? ()
+#12 0xf5cbef19 in ?? ()
+#13 0xf5cb09a2 in ?? ()
+#14 0x0809e0a5 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
+ has_pending_exception=0xffffd46f) at src/execution.cc:97
+```
+
+However enabling GDB JIT integration allows GDB to produce more informative stack trace:
+
+```
+#6 0x082857fc in v8::internal::Runtime_SetProperty (args=...) at src/runtime.cc:3758
+#7 0xf5cae28e in ?? ()
+#8 0xf5cc3a0a in loop () at test.js:6
+#9 0xf5cc38f4 in test.js () at test.js:13
+#10 0xf5cbef19 in ?? ()
+#11 0xf5cb09a2 in ?? ()
+#12 0x0809e1f9 in v8::internal::Invoke (construct=false, func=..., receiver=..., argc=0, args=0x0,
+ has_pending_exception=0xffffd44f) at src/execution.cc:97
+```
+
+Frames still unknown to GDB correspond to native code without source information. See [GDBJITInterface#KnownLimitations](GDBJITInterface#KnownLimitations.md) for more details.
+
+GDB JIT interface is specified in the GDB documentation: http://sourceware.org/gdb/current/onlinedocs/gdb/JIT-Interface.html
+
+# Enabling GDB JIT integration
+
+GDBJIT currently is by default excluded from the compilation and disabled in runtime. To enable it:
+
+ 1. Build V8 library with `ENABLE_GDB_JIT_INTERFACE` defined. If you are using scons to build V8 run it with `gdbjit=on`.
+ 1. Pass `--gdbjit` flag when starting V8.
+
+To check that you have enabled GDB JIT integration correctly try setting breakpoint on `__jit_debug_register_code`. This function will be invoked to notify GDB about new code objects.
+
+# Known Limitations
+
+ * GDB side of JIT Interface currently (as of GDB 7.2) does not handle registration of code objects very effectively. Each next registration takes more time: with 500 registered objects each next registration takes more than 50ms, with 1000 registered code objects - more than 300 ms. This problem was reported to GDB developers (http://sourceware.org/ml/gdb/2011-01/msg00002.html) but currently there is no solution available. To reduce pressure on GDB current implementation of GDB JIT integration operates in two modes: _default_ and _full_ (enabled by `--gdbjit-full` flag). In _default_ mode V8 notifies GDB only about code objects that have source information attached (this usually includes all user scripts). In _full_ - about all generated code objects (stubs, ICs, trampolines).
+
+ * On x64 GDB is unable to properly unwind stack without `.eh_frame` section (Issue 1053 (on Google Code))
+
+ * GDB is not notified about code deserialized from the snapshot (Issue 1054 (on Google Code))
+
+ * Only Linux OS on Intel-compatible CPUs is supported. For different OSes either a different ELF-header should be generated or a completely different object format should be used.
+
+ * Enabling GDB JIT interface disables compacting GC. This is done to reduce pressure on GDB as unregistering and registering each moved code object will incur considerable overhead.
+
+ * GDB JIT integration provides only _approximate_ source information. It does not provide any information about local variables, function's arguments, stack layout etc. It does not enable stepping through JavaScript code or setting breakpoint on the given line. However one can set a breakpoint on a function by it's name. \ No newline at end of file
diff --git a/deps/v8/docs/handling_of_ports.md b/deps/v8/docs/handling_of_ports.md
new file mode 100644
index 0000000000..9706546e23
--- /dev/null
+++ b/deps/v8/docs/handling_of_ports.md
@@ -0,0 +1,24 @@
+# General
+This article describes how ports should be handled.
+
+# MIPS
+## Straight-forward MIPS ports
+ 1. Do them yourself.
+
+## More complicated MIPS ports
+ 1. CC the MIPS team in the CL. Use the mailing list v8-mips-ports.at.googlegroups.com for that purpose.
+ 1. The MIPS team will provide you with a patch which you need to merge into your CL.
+ 1. Then land the CL.
+
+# PPC (not officially supported)
+ 1. Contact/CC the PPC team in the CL if needed. Use the mailing list v8-ppc-ports.at.googlegroups.com for that purpose.
+
+# x87 (not officially supported)
+ 1. Contact/CC the x87 team in the CL if needed. Use the mailing list v8-x87-ports.at.googlegroups.com for that purpose.
+
+# ARM
+## Straight-forward ARM ports
+ 1. Do them yourself.
+
+## When you are lost
+ 1. CC the ARM team in the CL. Use the mailing list v8-arm-ports.at.googlegroups.com for that purpose. \ No newline at end of file
diff --git a/deps/v8/docs/i18n_support.md b/deps/v8/docs/i18n_support.md
new file mode 100644
index 0000000000..a1eb1c8f0a
--- /dev/null
+++ b/deps/v8/docs/i18n_support.md
@@ -0,0 +1,44 @@
+# ECMAScript 402
+
+V8 optionally implements the [ECMAScript 402](http://www.ecma-international.org/ecma-402/1.0/) API. The API is enabled by default, but can be turned off at compile time.
+
+
+## Prerequisites
+
+The i18n implementation adds a dependency on ICU. If you run
+
+```
+make dependencies
+```
+
+a suitable version of ICU is checked out into `third_party/icu`.
+
+
+### Alternative ICU checkout
+
+You can check out the ICU sources at a different location and define the gyp variable `icu_gyp_path` to point at the `icu.gyp` file.
+
+
+### System ICU
+
+Last but not least, you can compile V8 against a version of ICU installed in your system. To do so, specify the gyp variable `use_system_icu=1`. If you also have `want_separate_host_toolset` enabled, the bundled ICU will still be compiled to generate the V8 snapshot. The system ICU will only be used for the target architecture.
+
+
+## Embedding V8
+
+If you embed V8 in your application, but your application itself doesn't use ICU, you will need to initialize ICU before calling into V8 by executing:
+
+```
+v8::V8::InitializeICU();
+```
+
+It is safe to invoke this method if ICU was not compiled in, then it does nothing.
+
+
+## Compiling without i18n support
+
+To build V8 without i18n support use
+
+```
+make i18nsupport=off native
+``` \ No newline at end of file
diff --git a/deps/v8/docs/javascript.md b/deps/v8/docs/javascript.md
new file mode 100644
index 0000000000..f3a501b985
--- /dev/null
+++ b/deps/v8/docs/javascript.md
@@ -0,0 +1,6 @@
+# Introduction
+
+JavaScript is a dynamically typed scripting language universally used to
+script web content in browsers.
+
+Its specification by ECMA can be found [here](http://www.ecma-international.org/publications/standards/Ecma-262.htm). \ No newline at end of file
diff --git a/deps/v8/docs/javascript_stack_trace_api.md b/deps/v8/docs/javascript_stack_trace_api.md
new file mode 100644
index 0000000000..4a0d104c05
--- /dev/null
+++ b/deps/v8/docs/javascript_stack_trace_api.md
@@ -0,0 +1,161 @@
+All internal errors thrown in V8 capture a stack trace when they are created that can be accessed from JavaScript through the error.stack property. V8 also has various hooks for controlling how stack traces are collected and formatted, and for allowing custom errors to also collect stack traces. This document outlines V8's JavaScript stack trace API.
+
+### Basic stack traces
+
+By default, almost all errors thrown by V8 have a `stack` property that holds the topmost 10 stack frames, formatted as a string. Here's an example of a fully formatted stack trace:
+
+```
+ReferenceError: FAIL is not defined
+ at Constraint.execute (deltablue.js:525:2)
+ at Constraint.recalculate (deltablue.js:424:21)
+ at Planner.addPropagate (deltablue.js:701:6)
+ at Constraint.satisfy (deltablue.js:184:15)
+ at Planner.incrementalAdd (deltablue.js:591:21)
+ at Constraint.addConstraint (deltablue.js:162:10)
+ at Constraint.BinaryConstraint (deltablue.js:346:7)
+ at Constraint.EqualityConstraint (deltablue.js:515:38)
+ at chainTest (deltablue.js:807:6)
+ at deltaBlue (deltablue.js:879:2)
+```
+
+The stack trace is collected when the error is created and is the same regardless of where or how many times the error is thrown. We collect 10 frames because it is usually enough to be useful but not so many that it has a noticeable performance impact. You can control how many stack frames are collected by setting the variable
+
+```
+Error.stackTraceLimit
+```
+
+Setting it to 0 will disable stack trace collection. Any finite integer value will be used as the maximum number of frames to collect. Setting it to `Infinity` means that all frames will be collected. This variable only affects the current context, it has to be set explicitly for each context that needs a different value. (Note that what is known as a "context" in V8 terminology corresponds to a page or iframe in Google Chrome). To set a different default value that affects all contexts use the
+
+```
+--stack-trace-limit <value>
+```
+
+command-line flag to V8. To pass this flag to V8 when running Google Chrome use
+
+```
+--js-flags="--stack-trace-limit <value>"
+```
+
+### Stack trace collection for custom exceptions
+The stack trace mechanism used for built-in errors is implemented using a general stack trace collection API that is also available to user scripts. The function
+
+```
+Error.captureStackTrace(error, constructorOpt)
+```
+
+adds a stack property to the given `error` object that will yield the stack trace at the time captureStackTrace was called. The reason for not just returning the formatted stack trace directly is that this way we can postpone the formatting of the stack trace until the stack property is accessed and avoid formatting completely if it never is.
+
+The optional `constructorOpt` parameter allows you to pass in a function value. When collecting the stack trace all frames above the topmost call to this function, including that call, will be left out of the stack trace. This can be useful to hide implementation details that won't be useful to the user. The usual way of defining a custom error that captures a stack trace would be:
+
+```
+function MyError() {
+ Error.captureStackTrace(this, MyError);
+ // any other initialization
+}
+```
+
+Passing in MyError as a second argument means that the constructor call to MyError won't show up in the stack trace.
+
+### Customizing stack traces
+Unlike Java where the stack trace of an exception is a structured value that allows inspection of the stack state, the stack property in V8 just holds a flat string containing the formatted stack trace. This is for no other reason than compatibility with other browsers. However, this is not hardcoded but only the default behavior and can be overridden by user scripts.
+
+For efficiency stack traces are not formatted when they are captured but on demand, the first time the stack property is accessed. A stack trace is formatted by calling
+
+```
+Error.prepareStackTrace(error, structuredStackTrace)
+```
+
+and using whatever this call returns as the value of the `stack` property. If you assign a different function value to `Error.prepareStackTrace` that function will be used to format stack traces. It will be passed the error object that it is preparing a stack trace for and a structured representation of the stack. User stack trace formatters are free to format the stack trace however they want and even return non-string values. It is safe to retain references to the structured stack trace object after a call to prepareStackTrace completes so that it is also a valid return value. Note that the custom prepareStackTrace function is immediately called at the point when the error object is created (e.g. with `new Error()`).
+
+The structured stack trace is an Array of CallSite objects, each of which represents a stack frame. A CallSite object defines the following methods
+
+ * **getThis**: returns the value of this
+ * **getTypeName**: returns the type of this as a string. This is the name of the function stored in the constructor field of this, if available, otherwise the object's `[[Class]]` internal property.
+ * **getFunction**: returns the current function
+ * **getFunctionName**: returns the name of the current function, typically its name property. If a name property is not available an attempt will be made to try to infer a name from the function's context.
+ * **getMethodName**: returns the name of the property of this or one of its prototypes that holds the current function
+ * **getFileName**: if this function was defined in a script returns the name of the script
+ * **getLineNumber**: if this function was defined in a script returns the current line number
+ * **getColumnNumber**: if this function was defined in a script returns the current column number
+ * **getEvalOrigin**: if this function was created using a call to eval returns a CallSite object representing the location where eval was called
+ * **isToplevel**: is this a toplevel invocation, that is, is this the global object?
+ * **isEval**: does this call take place in code defined by a call to eval?
+ * **isNative**: is this call in native V8 code?
+ * **isConstructor**: is this a constructor call?
+
+The default stack trace is created using the CallSite API so any information that is available there is also available through this API.
+
+To maintain restrictions imposed on strict mode functions, frames that have a strict mode function and all frames below (its caller etc.) are not allow to access their receiver and function objects. For those frames, `getFunction()` and `getThis()` will return `undefined`.
+
+### Compatibility
+The API described here is specific to V8 and is not supported by any other JavaScript implementations. Most implementations do provide an `error.stack` property but the format of the stack trace is likely to be different from the format described here. The recommended use of this API is
+
+ * Only rely on the layout of the formatted stack trace if you know your code is running in v8.
+ * It is safe to set `Error.stackTraceLimit` and `Error.prepareStackTrace` regardless of which implementation is running your code but be aware that it will only have an effect if your code is running in V8.
+
+### Appendix: Stack trace format
+The default stack trace format used by V8 can for each stack frame give the following information:
+
+ * Whether the call is a construct call.
+ * The type of the this value (Type).
+ * The name of the function called (functionName).
+ * The name of the property of this or one of its prototypes that holds the function (methodName).
+ * The current location within the source (location)
+
+Any of these may be unavailable and different formats for stack frames are used depending on how much of this information is available. If all the above information is available a formatted stack frame will look like this:
+
+```
+at Type.functionName [as methodName] (location)
+```
+
+or, in the case of a construct call
+
+```
+at new functionName (location)
+```
+
+If only one of functionName and methodName is available, or if they are both available but the same, the format will be:
+
+```
+at Type.name (location)
+```
+
+If neither is available `<anonymous>` will be used as the name.
+
+The Type value is the name of the function stored in the constructor field of this. In v8 all constructor calls set this property to the constructor function so unless this field has been actively changed after the object was created it it will hold the name of the function it was created by. If it is unavailable the `[[Class]]` property of the object will be used.
+
+One special case is the global object where the Type is not shown. In that case the stack frame will be formatted as
+
+```
+at functionName [as methodName] (location)
+```
+
+The location itself has several possible formats. Most common is the file name, line and column number within the script that defined the current function
+
+```
+fileName:lineNumber:columnNumber
+```
+
+If the current function was created using eval the format will be
+
+```
+eval at position
+```
+
+where position is the full position where the call to eval occurred. Note that this means that positions can be nested if there are nested calls to eval, for instance:
+
+```
+eval at Foo.a (eval at Bar.z (myscript.js:10:3))
+```
+
+If a stack frame is within V8's libraries the location will be
+
+```
+native
+```
+
+and if is unavailable it will be
+
+```
+unknown location
+``` \ No newline at end of file
diff --git a/deps/v8/docs/merging_and_patching.md b/deps/v8/docs/merging_and_patching.md
new file mode 100644
index 0000000000..d141f32984
--- /dev/null
+++ b/deps/v8/docs/merging_and_patching.md
@@ -0,0 +1,66 @@
+# Introduction
+
+If you have a patch to the master branch (e.g. an important bug fix) that needs to be merged into one of the production V8 branches, read on.
+
+For the examples, a branched 2.4 version of V8 will be used. Substitute "2.4" with your version number.
+
+**An associated issue is mandatory if a patch is merged. This helps with keeping track of merges.**
+
+# Merge process outlined
+
+The merge process in the Chromium and V8 tracker is driven by labels in the form of
+```
+Merge-[Status]-[Branch]
+```
+The currently important labels for V8 are:
+
+ 1. Merge-Request-## initiates the process => This fix should be merged into M-##
+ 1. Merge-Review-## The merge is not approved yet for M-## e.g. because Canary coverage is missing
+ 1. Merge-Approved-## => Simply means that the Chrome TPM are signing the merge off
+ 1. Merge-Merged-$BRANCHNUMBER$ => When the merge is done the Merge-Approved label is swapped with this one. $BRANCHNUMBER$ is the name/number of the V8 branch e.g. 4.3 for M-43.
+
+# Instructions for git using the automated script
+
+## How to check if a commit was already merged/reverted
+
+Use mergeinfo.py to get all the commits which are connected to the HASH according to Git.
+
+```
+tools/release/mergeinfo.py HASH
+```
+
+## Step 1: Run the script
+
+Let's assume you're merging revision af3cf11 to branch 2.4 (please specify full git hashes - abbreviations are used here for simplicity).
+
+```
+tools/release/merge_to_branch.py --branch 2.4 af3cf11
+```
+
+Run the script with '-h' to display its help message, which includes more options (e.g. you can specify a file containing your patch, or you can reverse a patch, specify a custom commit message, or resume a merging process you've canceled before). Note that the script will use a temporary checkout of v8 - it won't touch your work space.
+You can also merge more than one revision at once, just list them all.
+
+```
+tools/release/merge_to_branch.py --branch 2.4 af3cf11 cf33f1b sf3cf09
+```
+
+## Step 2: Send a notification letter to hablich@chromium.org
+
+Saying something like this:
+```
+_Subject:_ Regression fix merged into V8 2.4 branch (Chrome 8)
+
+_Body:_ We have merged a fix to the V8 version 2.4 branch (the version used in Chrome 8)
+
+Version 2.4.9.10: Issue xxx: The parser doesn't parse.
+```
+
+# FAQ
+
+## I get an error during merge that is related to tagging. What should I do?
+When two people are merging at the same time a race-condition can happen in the merge scripts. If this is the case, contact machenbach@chromium.org and hablich@chromium.org.
+## Is there a TL;DR;?
+ 1. Create issue
+ 1. Add Merge-Request-{Branch} to the issue
+ 1. Wait until somebody will add Merge-Approved-{Branch}
+ 1. Merge \ No newline at end of file
diff --git a/deps/v8/docs/profiling_chromium_with_v8.md b/deps/v8/docs/profiling_chromium_with_v8.md
new file mode 100644
index 0000000000..46cdac44ad
--- /dev/null
+++ b/deps/v8/docs/profiling_chromium_with_v8.md
@@ -0,0 +1,34 @@
+# Introduction
+
+V8's CPU & Heap profilers are trivial to use from V8's shells (see V8Profiler), but it may appear confusing how to use them with Chromium. This page should help you with it.
+
+# Instructions
+
+## Why using V8's profilers with Chromium is different from using them with V8 shells?
+
+Chromium is a complex application, unlike V8 shells. Below is the list of Chromium features that affect profiler usage:
+
+ * each renderer is a separate process (OK, not actually each, but let's omit this detail), so they can't share the same log file;
+ * sandbox built around renderer process prevents it from writing to a disk;
+ * Developer Tools configure profilers for their own purposes;
+ * V8's logging code contains some optimizations to simplify logging state checks.
+
+## So, how to run Chromium to get a CPU profile?
+
+Here is how to run Chromium in order to get a CPU profile from the start of the process:
+```
+./Chromium --no-sandbox --js-flags="--logfile=%t.log --prof"
+```
+
+Please note that you wouldn't see profiles in Developer Tools, because all the data is being logged to a file, not to Developer Tools.
+
+### Flags description
+
+ * **--no-sandbox** - turns off the renderer sandbox, obviously must have;
+ * **--js-flags** - this is the containers for flags passed to V8:
+ * **--logfile=%t.log** - specifies a name pattern for log files; **%t** gets expanded into current time in milliseconds, so each process gets its own log file; you can use prefixes and suffixes if you want, like this: **prefix-%t-suffix.log**;
+ * **--prof** - tells V8 to write statistical profiling information into the log file.
+
+## Notes
+
+Under Windows, be sure to turn on .MAP file creation for **chrome.dll**, but not for **chrome.exe**. \ No newline at end of file
diff --git a/deps/v8/docs/release_process.md b/deps/v8/docs/release_process.md
new file mode 100644
index 0000000000..c6b36ad68e
--- /dev/null
+++ b/deps/v8/docs/release_process.md
@@ -0,0 +1,57 @@
+# Introduction
+
+The V8 release process is tightly connected to [Chrome's](https://www.chromium.org/getting-involved/dev-channel). The V8 team is using all four Chrome release channels to push new versions to the users.
+
+If you want to look up what V8 version is in a Chrome release you can check [OmahaProxy](https://omahaproxy.appspot.com/). For each Chrome release a separate branch is created in the V8 repository to make the trace-back easier e.g. for [Chrome 45.0.2413.0](https://chromium.googlesource.com/v8/v8.git/+/chromium/2413).
+
+# Canary releases
+Every day a new Canary build is pushed to the users via [Chrome's Canary channel](https://www.google.com/chrome/browser/canary.html?platform=win64). Normally the deliverable is the latest, stable enough version from [master](https://chromium.googlesource.com/v8/v8.git/+/roll).
+
+Branches for a Canary normally look like this
+
+```
+remotes/origin/4.5.35
+```
+
+# Dev releases
+Every week a new Dev build is pushed to the users via [Chrome's Dev channel](https://www.google.com/chrome/browser/desktop/index.html?extra=devchannel&platform=win64). Normally the deliverable includes the latest stable enough V8 version on the Canary channel.
+
+Branches for a Dev normally look like this
+
+```
+remotes/origin/4.5.35
+```
+
+# Beta releases
+Roughly every 6 weeks a new major branch is created e.g. [for Chrome 44](https://chromium.googlesource.com/v8/v8.git/+log/branch-heads/4.4). This is happening in sync with the creation of [Chrome's Beta channel](https://www.google.com/chrome/browser/beta.html?platform=win64). The Chrome Beta is pinned to the head of V8's branch. After approx. 6 weeks the branch is promoted to Stable.
+
+Changes are only cherry-picked onto the branch in order to stabilize the version.
+
+Branches for a Beta normally look like this
+
+```
+remotes/branch-heads/4.5
+```
+
+They are based on a Canary branch.
+
+# Stable releases
+Roughly every 6 weeks a new major Stable release is done. No special branch is created as the latest Beta branch is simply promoted to Stable. This version is pushed to the users via [Chrome's Stable channel](https://www.google.com/chrome/browser/desktop/index.html?platform=win64).
+
+Branches for a Stable normally look like this
+
+```
+remotes/branch-heads/4.5
+```
+
+They are promoted (reused) Beta branches.
+
+# Which version should I embed in my application?
+
+The tip of the same branch that Chrome's Stable channel uses.
+
+We often backmerge important bug fixes to a stable branch, so if you care about stability and security and correctness, you should include those updates too -- that's why we recommend "the tip of the branch", as opposed to an exact version.
+
+As soon as a new branch is promoted to Stable, we stop maintaining the previous stable branch. This happens every six weeks, so you should be prepared to update at least this often.
+
+Example: The current stable Chrome release is [44.0.2403.125](https://omahaproxy.appspot.com), with V8 4.4.63.25. So you should embed [branch-heads/4.4](https://chromium.googlesource.com/v8/v8.git/+/branch-heads/4.4). And you should update to branch-heads/4.5 when Chrome 45 is released on the Stable channel. \ No newline at end of file
diff --git a/deps/v8/docs/runtime_functions.md b/deps/v8/docs/runtime_functions.md
new file mode 100644
index 0000000000..4f99fdb2ca
--- /dev/null
+++ b/deps/v8/docs/runtime_functions.md
@@ -0,0 +1,14 @@
+# Introduction
+
+Much of the JavaScript library is implemented in JavaScript code itself,
+using a minimal set of C++ runtime functions callable from JavaScript.
+Some of these are called using names that start with %, and using the flag
+"--allow-natives-syntax". Others are only called by code generated by the
+code generators, and are not visible in JS, even using the % syntax.
+
+<a href='Hidden comment:
+= Details =
+
+Here are the V8 runtime functions, their JS names, if they are visible,
+and their documentation.
+<wiki:comment> \ No newline at end of file
diff --git a/deps/v8/docs/source.md b/deps/v8/docs/source.md
new file mode 100644
index 0000000000..fa869b4ec2
--- /dev/null
+++ b/deps/v8/docs/source.md
@@ -0,0 +1,39 @@
+**Quick links:** [browse](http://code.google.com/p/v8/source/browse) | [browse bleeding edge](http://code.google.com/p/v8/source/browse/branches/bleeding_edge) | [changes](https://chromium.googlesource.com/v8/v8.git).
+
+## Command-Line Access
+
+### Git
+See [UsingGit](using_git.md).
+
+### Subversion (deprecated)
+
+Use this command to anonymously check out the up-to-date stable version of the project source code:
+
+> `svn checkout http://v8.googlecode.com/svn/trunk/ v8`
+
+If you plan to contribute to V8 but are not a member, use this command to anonymously check out a read-only version of the development branch:
+
+> `svn checkout http://v8.googlecode.com/svn/branches/bleeding_edge/ v8`
+
+If you're a member of the project, use this command to check out a writable development branch as yourself using HTTPS:
+
+> `svn checkout https://v8.googlecode.com/svn/branches/bleeding_edge/ v8 --username <your username>`
+
+When prompted, enter your generated [googlecode.com](http://code.google.com/hosting/settings) password.
+
+## Source Code Branches
+
+There are several different branches of V8; if you're unsure of which version to get, you most likely want the up-to-date stable version in `trunk/`. Here's an overview of the different branches:
+
+ * The bleeding edge, `branches/bleeding_edge/`, is where active development takes place. If you're considering contributing to V8 this is the branch to get.
+ * Under `trunk/` is the "stable edge", which is updated a few times per week. It is a copy of the bleeding edge that has been successfully tested. Use this if you want to be almost up to date and don't want your code to break whenever we accidentally forget to add a file on the bleeding edge. Some of the trunk revisions are tagged with X.Y.Z.T version labels. When we decide which of X.Y.**.** is the "most stable", it becomes the X.Y branch in subversion.
+ * If you want a well-tested version that doesn't change except for bugfixes, use one of the versioned branches (e.g. `branches/3.16/` at the time of this writing). Note that usually only the last two branches are actively maintained; any older branches could have unfixed security holes. You may want to follow the V8 version that Chrome is shipping on its stable (or beta) channels, see http://omahaproxy.appspot.com.
+
+## V8 public API compatibility
+
+V8 public API (basically the files under include/ directory) may change over time. New types/methods may be added without breaking existing functionality. When we decide that want to drop some existing class/methods, we first mark it with [V8\_DEPRECATED](https://code.google.com/p/chromium/codesearch#search/&q=V8_DEPRECATED&sq=package:chromium&type=cs) macro which will cause compile time warnings when the deprecated methods are called by the embedder. We keep deprecated method for one branch and then remove it. E.g. if `v8::CpuProfiler::FindCpuProfile` was plain non deprecated in _3.17_ branch, marked as `V8_DEPRECATED` in _3.18_, it may well be removed in _3.19_ branch.
+
+
+## GUI and IDE Access
+
+This project's Subversion repository may be accessed using many different client programs and plug-ins. See your client's documentation for more information. \ No newline at end of file
diff --git a/deps/v8/docs/testing.md b/deps/v8/docs/testing.md
new file mode 100644
index 0000000000..a777c0c5a0
--- /dev/null
+++ b/deps/v8/docs/testing.md
@@ -0,0 +1,58 @@
+V8 includes a test framework that allows you to test the engine. The framework lets you run both our own test suites that are included with the source code and others, currently only the Mozilla tests.
+
+## Running the V8 tests
+
+Before you run the tests, you will have to build V8 with GYP using the instructions [here](http://code.google.com/p/v8-wiki/wiki/BuildingWithGYP)
+
+You can append `.check` to any build target to have tests run for it, e.g.
+```
+make ia32.release.check
+make ia32.check
+make release.check
+make check # builds and tests everything (no dot before "check"!)
+```
+
+Before submitting patches, you should always run the quickcheck target, which builds a fast debug build and runs only the most relevant tests:
+```
+make quickcheck
+```
+
+You can also run tests manually:
+```
+tools/run-tests.py --arch-and-mode=ia32.release [--outdir=foo]
+```
+
+Or you can run individual tests:
+```
+tools/run-tests.py --arch=ia32 cctest/test-heap/SymbolTable mjsunit/delete-in-eval
+```
+
+Run the script with `--help` to find out about its other options, `--outdir` defaults to `out`. Also note that using the `cctest` binary to run multiple tests in one process is not supported.
+
+## Running the Mozilla and Test262 tests
+
+The V8 test framework comes with support for running the Mozilla as well as the Test262 test suite. To download the test suites and then run them for the first time, do the following:
+
+```
+tools/run-tests.py --download-data mozilla
+tools/run-tests.py --download-data test262
+```
+
+To run the tests subsequently, you may omit the flag that downloads the test suite:
+
+```
+tools/run-tests.py mozilla
+tools/run-tests.py test262
+```
+
+Note that V8 fails a number of Mozilla tests because they require Firefox-specific extensions.
+
+## Running the WebKit tests
+
+Sometimes all of the above tests pass but WebKit build bots fail. To make sure WebKit tests pass run:
+
+```
+tools/run-tests.py --progress=verbose --outdir=out --arch=ia32 --mode=release webkit --timeout=200
+```
+
+Replace --arch and other parameters with values that match your build options. \ No newline at end of file
diff --git a/deps/v8/docs/triaging_issues.md b/deps/v8/docs/triaging_issues.md
new file mode 100644
index 0000000000..075186f697
--- /dev/null
+++ b/deps/v8/docs/triaging_issues.md
@@ -0,0 +1,22 @@
+# How to get an issue triaged
+* *V8 tracker*: Set the state to `Untriaged`
+* *Chromium tracker*: Set the state to `Untriaged` and add the label `Cr-Blink-JavaScript`
+
+# How to assign V8 issues in the Chromium tracker
+Please assign issues to the V8 specialty sheriffs of one of the
+following categories:
+
+ * Stability: jkummerow@c....org, adamk@c....org
+ * Performance: bmeurer@c....org, mvstanton@c....org
+ * Clusterfuzz: Set the bug to the following state:
+ * `label:ClusterFuzz label:Cr-Blink-JavaScript status:Available -has:owner`
+ * Will show up in [this](https://code.google.com/p/chromium/issues/list?can=2&q=label%3AClusterFuzz+label%3ACr-Blink-JavaScript+status%3AAvailable+-has%3Aowner&colspec=ID+Pri+M+Week+ReleaseBlock+Cr+Status+Owner+Summary+OS+Modified&x=m&y=releaseblock&cells=tiles) query.
+ * CC mstarzinger@ and ishell@
+
+Please CC hablich@c....org on all issues.
+
+Assign remaining issues to hablich@c....org.
+
+Use the label Cr-Blink-JavaScript on all issues.
+
+**Please note that this only applies to issues tracked in the Chromium issue tracker.** \ No newline at end of file
diff --git a/deps/v8/docs/using_git.md b/deps/v8/docs/using_git.md
new file mode 100644
index 0000000000..b5e392aedd
--- /dev/null
+++ b/deps/v8/docs/using_git.md
@@ -0,0 +1,147 @@
+# Git repository
+
+V8's git repository is located at https://chromium.googlesource.com/v8/v8.git
+
+V8's master branch has also an official git mirror on github: http://github.com/v8/v8-git-mirror.
+
+**Don't just `git-clone` either of these URLs** if you want to build V8 from your checkout, instead follow the instructions below to get everything set up correctly.
+
+## Prerequisites
+
+ 1. **Git**. To install using `apt-get`:
+```
+apt-get install git
+```
+ 1. **depot\_tools**. See [instructions](http://dev.chromium.org/developers/how-tos/install-depot-tools).
+ 1. For **push access**, you need to setup a .netrc file with your git password:
+ 1. Go to https://chromium.googlesource.com/new-password - login with your committer account (e.g. @chromium.org account, non-chromium.org ones work too). Note: creating a new password doesn't automatically revoke any previously created passwords.
+ 1. Follow the instructions in the "Staying Authenticated" section. It would ask you to copy-paste two lines into your ~/.netrc file.
+ 1. In the end, ~/.netrc should have two lines that look like:
+```
+machine chromium.googlesource.com login git-yourusername.chromium.org password <generated pwd>
+machine chromium-review.googlesource.com login git-yourusername.chromium.org password <generated pwd>
+```
+ 1. Make sure that ~/.netrc file's permissions are 0600 as many programs refuse to read .netrc files which are readable by anyone other than you.
+
+
+## How to start
+
+Make sure depot\_tools are up-to-date by typing once:
+
+```
+gclient
+```
+
+
+Then get V8, including all branches and dependencies:
+
+```
+fetch v8
+cd v8
+```
+
+After that you're intentionally in a detached head state.
+
+Optionally you can specify how new branches should be tracked:
+
+```
+git config branch.autosetupmerge always
+git config branch.autosetuprebase always
+```
+
+Alternatively, you can create new local branches like this (recommended):
+
+```
+git new-branch mywork
+```
+
+## Staying up-to-date
+
+Update your current branch with git pull. Note that if you're not on a branch, git pull won't work, and you'll need to use git fetch instead.
+
+```
+git pull
+```
+
+Sometimes dependencies of v8 are updated. You can synchronize those by running
+
+```
+gclient sync
+```
+
+## Sending code for reviewing
+
+```
+git cl upload
+```
+
+## Committing
+
+You can use the CQ checkbox on codereview for committing (preferred). See also the [chromium instructions](http://www.chromium.org/developers/testing/commit-queue) for CQ flags and troubleshooting.
+
+If you need more trybots than the default, add the following to your commit message on rietveld (e.g. for adding a nosnap bot):
+
+```
+CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux_nosnap_rel
+```
+
+To land manually, update your branch:
+
+```
+git pull --rebase origin
+```
+
+Then commit using
+
+```
+git cl land
+```
+
+# For project members
+
+
+## Try jobs
+
+### Creating a try job from codereview
+
+ 1. Upload a CL to rietveld.
+```
+git cl upload
+```
+ 1. Try the CL by sending a try job to the try bots like this:
+```
+git cl try
+```
+ 1. Wait for the try bots to build and you will get an e-mail with the result. You can also check the try state at your patch on codereview.
+ 1. If applying the patch fails you either need to rebase your patch or specify the v8 revision to sync to:
+```
+git cl try --revision=1234
+```
+
+### Creating a try job from a local branch
+
+ 1. Commit some changes to a git branch in the local repo.
+ 1. Try the change by sending a try job to the try bots like this:
+```
+git try
+```
+ 1. Wait for the try bots to build and you will get an e-mail with the result. Note: There are issues with some of the slaves at the moment. Sending try jobs from codereview is recommended.
+
+### Useful arguments
+
+The revision argument tells the try bot what revision of the code base will be used for applying your local changes to. Without the revision, our LKGR revision is used as the base (http://v8-status.appspot.com/lkgr).
+```
+git try --revision=1234
+```
+To avoid running your try job on all bots, use the --bot flag with a comma-separated list of builder names. Example:
+```
+git try --bot=v8_mac_rel
+```
+
+### Viewing the try server
+
+http://build.chromium.org/p/tryserver.v8/waterfall
+
+### Access credentials
+
+If asked for access credentials, use your @chromium.org email address and your generated password from [googlecode.com](http://code.google.com/hosting/settings). \ No newline at end of file
diff --git a/deps/v8/docs/v8_c_plus_plus_styleand_sops.md b/deps/v8/docs/v8_c_plus_plus_styleand_sops.md
new file mode 100644
index 0000000000..5686504543
--- /dev/null
+++ b/deps/v8/docs/v8_c_plus_plus_styleand_sops.md
@@ -0,0 +1,7 @@
+# Introduction
+
+In general, V8 should conform to Google's/Chrome's C++ Style Guide for new code that is written. Your V8 code should conform to them as much as possible. There will always be cases where Google/Chrome Style Guide conformity or Google/Chrome best practices are extremely cumbersome or underspecified for our use cases. We document these exceptions here.
+
+# Details
+
+Coming Soon \ No newline at end of file
diff --git a/deps/v8/docs/v8_committers_responsibility.md b/deps/v8/docs/v8_committers_responsibility.md
new file mode 100644
index 0000000000..ee8d212563
--- /dev/null
+++ b/deps/v8/docs/v8_committers_responsibility.md
@@ -0,0 +1,41 @@
+## Basic commit guidelines
+
+When you're committing to the V8 repositories, ensure that you follow those guidelines:
+
+ 1. Find the right reviewer for your changes and for patches you're asked to review.
+ 1. Be available on IM and/or email before and after you land the change.
+ 1. Watch the [waterfall](http://build.chromium.org/p/client.v8/console) until all bots turn green after your change.
+ 1. When landing a TBR change (To Be Reviewed), make sure to notify the people whose code you're changing. Usually just send the review e-mail.
+
+In short, do the right thing for the project, not the easiest thing to get code committed, and above all: use your best judgement.
+
+**Don't be afraid to ask questions. There is always someone who will immediately read messages sent to the v8-committers mailing list who can help you.**
+
+## Changes with multiple reviewers
+
+There are occasionally changes with a lot of reviewers on them, since sometimes several people might need to be in the loop for a change because of multiple areas of responsibility and expertise.
+
+The problem is that without some guidelines, there's no clear responsibility given in these reviews.
+
+If you're the sole reviewer on a change, you know you have to do a good job. When there are three other people, you sometimes assume that somebody else must have looked carefully at some part of the review. Sometimes all the reviewers think this and the change isn't reviewed properly.
+
+In other cases, some reviewers say "LGTM" for a patch, while others are still expecting changes. The author can get confused as to the status of the review, and some patches have been checked in where at least one reviewer expected further changes before committing.
+
+At the same time, we want to encourage many people to participate in the review process and keep tabs on what's going on.
+
+So, here are some guidelines to help clarify the process:
+ 1. When a patch author requests more than one reviewer, they should make clear in the review request email what they expect the responsibility of each reviewer to be. For example, you could write this in the email:
+```
+
+ a. larry: bitmap changes
+ b. sergey: process hacks
+ c. everybody else: FYI
+
+```
+ 1. In this case, you might be on the review list because you've asked to be in the loop for multiprocess changes, but you wouldn't be the primary reviewer and the author and other reviewers wouldn't be expecting you to review all the diffs in detail.
+ 1. If you get a review that includes many other people, and the author didn't do (1), please ask them what part you're responsible for if you don't want to review the whole thing in detail.
+ 1. The author should wait for approval from everybody on the reviewer list before checking in.
+ 1. People who are on a review without clear review responsibility (i.e. drive-by reviews) should be super responsive and not hold up the review. The patch author should feel free to ping them mercilessly if they are.
+ 1. If you're an "FYI" person on a review and you didn't actually review in detail (or at all), but don't have a problem with the patch, note this. You could say something like "rubber stamp" or "ACK" instead of "LGTM." This way the real reviewers know not to trust that you did their work for them, but the author of the patch knows they don't have to wait for further feedback from you. Hopefully we can still keep everybody in the loop but have clear ownership and detailed reviews. It might even speed up some changes since you can quickly "ACK" changes you don't care about, and the author knows they don't have to wait for feedback from you.
+
+(Adapted from: http://dev.chromium.org/developers/committers-responsibility ) \ No newline at end of file
diff --git a/deps/v8/docs/v8_profiler.md b/deps/v8/docs/v8_profiler.md
new file mode 100644
index 0000000000..670fe11dd7
--- /dev/null
+++ b/deps/v8/docs/v8_profiler.md
@@ -0,0 +1,141 @@
+# Introduction
+
+V8 has built-in sample based profiling. Profiling is turned off by default, but can be enabled via the --prof command line option. The sampler records stacks of both JavaScript and C/C++ code.
+
+# Build
+Build the d8 shell following the instructions at [BuildingWithGYP](BuildingWithGYP.md).
+
+
+# Command Line
+To start profiling, use the `--prof` option. When profiling, V8 generates a `v8.log` file which contains profiling data.
+
+Windows:
+```
+build\Release\d8 --prof script.js
+```
+
+Other platforms (replace "ia32" with "x64" if you want to profile the x64 build):
+```
+out/ia32.release/d8 --prof script.js
+```
+
+# Process the Generated Output
+
+Log file processing is done using JS scripts running by the d8 shell. For this to work, a `d8` binary (or symlink, or `d8.exe` on Windows) must be in the root of your V8 checkout, or in the path specified by the environment variable `D8_PATH`. Note: this binary is just used to process the log, but not for the actual profiling, so it doesn't matter which version etc. it is.
+
+Windows:
+```
+tools\windows-tick-processor.bat v8.log
+```
+
+Linux:
+```
+tools/linux-tick-processor v8.log
+```
+
+Mac OS X:
+```
+tools/mac-tick-processor v8.log
+```
+
+## Snapshot-based VM build and builtins reporting
+
+When a snapshot-based VM build is being used, code objects from a snapshot that don't correspond to functions are reported with generic names like _"A builtin from the snapshot"_, because their real names are not stored in the snapshot. To see the names the following steps must be taken:
+
+ * `--log-snapshot-positions` flag must be passed to VM (along with `--prof`); this way, for deserialized objects the `(memory address, snapshot offset)` pairs are being emitted into profiler log;
+
+ * `--snapshot-log=<log file from mksnapshot>` flag must be passed to the tick processor script; a log file from the `mksnapshot` program (a snapshot log) contains address-offset pairs for serialized objects, and their names; using the snapshot log, names can be mapped onto deserialized objects during profiler log processing; the snapshot log file is called `snapshot.log` and resides alongside with V8's compiled files.
+
+An example of usage:
+```
+out/ia32.release/d8 --prof --log-snapshot-positions script.js
+tools/linux-tick-processor --snapshot-log=out/ia32.release/obj.target/v8_snapshot/geni/snapshot.log v8.log
+```
+
+# Programmatic Control of Profiling
+If you would like to control in your application when profile samples are collected, you can do so.
+
+First you'll probably want to use the `--noprof-auto` command line switch which prevents the profiler from automatically starting to record profile ticks.
+
+Profile ticks will not be recorded until your application specifically invokes these APIs:
+ * `V8::ResumeProfiler()` - start/resume collection of data
+ * `V8::PauseProfiler()` - pause collection of data
+
+# Example Output
+
+```
+Statistical profiling result from benchmarks\v8.log, (4192 ticks, 0 unaccounted, 0 excluded).
+
+ [Shared libraries]:
+ ticks total nonlib name
+ 9 0.2% 0.0% C:\WINDOWS\system32\ntdll.dll
+ 2 0.0% 0.0% C:\WINDOWS\system32\kernel32.dll
+
+ [JavaScript]:
+ ticks total nonlib name
+ 741 17.7% 17.7% LazyCompile: am3 crypto.js:108
+ 113 2.7% 2.7% LazyCompile: Scheduler.schedule richards.js:188
+ 103 2.5% 2.5% LazyCompile: rewrite_nboyer earley-boyer.js:3604
+ 103 2.5% 2.5% LazyCompile: TaskControlBlock.run richards.js:324
+ 96 2.3% 2.3% Builtin: JSConstructCall
+ ...
+
+ [C++]:
+ ticks total nonlib name
+ 94 2.2% 2.2% v8::internal::ScavengeVisitor::VisitPointers
+ 33 0.8% 0.8% v8::internal::SweepSpace
+ 32 0.8% 0.8% v8::internal::Heap::MigrateObject
+ 30 0.7% 0.7% v8::internal::Heap::AllocateArgumentsObject
+ ...
+
+
+ [GC]:
+ ticks total nonlib name
+ 458 10.9%
+
+ [Bottom up (heavy) profile]:
+ Note: percentage shows a share of a particular caller in the total
+ amount of its parent calls.
+ Callers occupying less than 2.0% are not shown.
+
+ ticks parent name
+ 741 17.7% LazyCompile: am3 crypto.js:108
+ 449 60.6% LazyCompile: montReduce crypto.js:583
+ 393 87.5% LazyCompile: montSqrTo crypto.js:603
+ 212 53.9% LazyCompile: bnpExp crypto.js:621
+ 212 100.0% LazyCompile: bnModPowInt crypto.js:634
+ 212 100.0% LazyCompile: RSADoPublic crypto.js:1521
+ 181 46.1% LazyCompile: bnModPow crypto.js:1098
+ 181 100.0% LazyCompile: RSADoPrivate crypto.js:1628
+ ...
+```
+
+# Timeline plot
+The timeline plot visualizes where V8 is spending time. This can be used to find bottlenecks and spot things that are unexpected (for example, too much time spent in the garbage collector). Data for the plot are gathered by both sampling and instrumentation. Linux with gnuplot 4.6 is required.
+
+To create a timeline plot, run V8 as described above, with the option `--log-timer-events` additional to `--prof`:
+```
+out/ia32.release/d8 --prof --log-timer-events script.js
+```
+
+The output is then passed to a plot script, similar to the tick-processor:
+```
+tools/plot-timer-events v8.log
+```
+
+This creates `timer-events.png` in the working directory, which can be opened with most image viewers.
+
+# Options
+Since recording log output comes with a certain performance overhead, the script attempts to correct this using a distortion factor. If not specified, it tries to find out automatically. You can however also specify the distortion factor manually.
+```
+tools/plot-timer-events --distortion=4500 v8.log
+```
+
+You can also manually specify a certain range for which to create the plot or statistical profile, expressed in milliseconds:
+```
+tools/plot-timer-events --distortion=4500 --range=1000,2000 v8.log
+tools/linux-tick-processor --distortion=4500 --range=1000,2000 v8.log
+```
+
+# HTML 5 version
+Both statistical profile and timeline plot are available [in the browser](http://v8.googlecode.com/svn/branches/bleeding_edge/tools/profviz/profviz.html). However, the statistical profile lacks C++ symbol resolution and the Javascript port of gnuplot performs an order of magnitude slower than the native one. \ No newline at end of file
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 8071fb34b6..0b64fb3882 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -5,7 +5,7 @@
#ifndef V8_V8_DEBUG_H_
#define V8_V8_DEBUG_H_
-#include "v8.h"
+#include "v8.h" // NOLINT(build/include)
/**
* Debugger support for the V8 JavaScript engine.
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 121e8030a1..e432600290 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -6,7 +6,7 @@
#define V8_V8_PROFILER_H_
#include <vector>
-#include "v8.h"
+#include "v8.h" // NOLINT(build/include)
/**
* Profiler support for the V8 JavaScript engine.
diff --git a/deps/v8/include/v8-testing.h b/deps/v8/include/v8-testing.h
index c827b69401..d18fc72583 100644
--- a/deps/v8/include/v8-testing.h
+++ b/deps/v8/include/v8-testing.h
@@ -5,7 +5,7 @@
#ifndef V8_V8_TEST_H_
#define V8_V8_TEST_H_
-#include "v8.h"
+#include "v8.h" // NOLINT(build/include)
/**
* Testing support for the V8 JavaScript engine.
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index c996c9997d..73ec658f7b 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -5,7 +5,7 @@
#ifndef V8_UTIL_H_
#define V8_UTIL_H_
-#include "v8.h"
+#include "v8.h" // NOLINT(build/include)
#include <map>
#include <vector>
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 7744c089f6..3a3c69864f 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 6
-#define V8_BUILD_NUMBER 85
-#define V8_PATCH_LEVEL 31
+#define V8_MINOR_VERSION 7
+#define V8_BUILD_NUMBER 80
+#define V8_PATCH_LEVEL 24
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 481cdd9e94..3953543411 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -19,8 +19,8 @@
#include <stdint.h>
#include <stdio.h>
-#include "v8-version.h"
-#include "v8config.h"
+#include "v8-version.h" // NOLINT(build/include)
+#include "v8config.h" // NOLINT(build/include)
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
@@ -112,8 +112,8 @@ class MaybeLocal;
template <class T> class Eternal;
template<class T> class NonCopyablePersistentTraits;
template<class T> class PersistentBase;
-template<class T,
- class M = NonCopyablePersistentTraits<T> > class Persistent;
+template <class T, class M = NonCopyablePersistentTraits<T> >
+class Persistent;
template <class T>
class Global;
template<class K, class V, class T> class PersistentValueMap;
@@ -815,7 +815,7 @@ class Global : public PersistentBase<T> {
/**
* Move constructor.
*/
- V8_INLINE Global(Global&& other) : PersistentBase<T>(other.val_) {
+ V8_INLINE Global(Global&& other) : PersistentBase<T>(other.val_) { // NOLINT
other.val_ = nullptr;
}
V8_INLINE ~Global() { this->Reset(); }
@@ -823,7 +823,7 @@ class Global : public PersistentBase<T> {
* Move via assignment.
*/
template <class S>
- V8_INLINE Global& operator=(Global<S>&& rhs) {
+ V8_INLINE Global& operator=(Global<S>&& rhs) { // NOLINT
TYPE_CHECK(T, S);
if (this != &rhs) {
this->Reset();
@@ -835,7 +835,7 @@ class Global : public PersistentBase<T> {
/**
* Pass allows returning uniques from functions, etc.
*/
- Global Pass() { return static_cast<Global&&>(*this); }
+ Global Pass() { return static_cast<Global&&>(*this); } // NOLINT
/*
* For compatibility with Chromium's base::Bind (base::Passed).
@@ -4027,6 +4027,13 @@ class V8_EXPORT Template : public Data {
PropertyAttribute attribute = None,
AccessControl settings = DEFAULT);
+#ifdef V8_JS_ACCESSORS
+ void SetAccessorProperty(Local<Name> name,
+ Local<Function> getter = Local<Function>(),
+ Local<Function> setter = Local<Function>(),
+ PropertyAttribute attribute = None);
+#endif // V8_JS_ACCESSORS
+
/**
* Whenever the property with the given name is accessed on objects
* created from this Template the getter and setter callbacks
@@ -5371,19 +5378,6 @@ class V8_EXPORT Isolate {
static Isolate* GetCurrent();
/**
- * Custom callback used by embedders to help V8 determine if it should abort
- * when it throws and no internal handler is predicted to catch the
- * exception. If --abort-on-uncaught-exception is used on the command line,
- * then V8 will abort if either:
- * - no custom callback is set.
- * - the custom callback set returns true.
- * Otherwise, the custom callback will not be called and V8 will not abort.
- */
- typedef bool (*AbortOnUncaughtExceptionCallback)(Isolate*);
- void SetAbortOnUncaughtExceptionCallback(
- AbortOnUncaughtExceptionCallback callback);
-
- /**
* Methods below this point require holding a lock (using Locker) in
* a multi-threaded environment.
*/
@@ -5925,7 +5919,7 @@ class V8_EXPORT Isolate {
void SetObjectGroupId(internal::Object** object, UniqueId id);
void SetReferenceFromGroup(UniqueId id, internal::Object** object);
void SetReference(internal::Object** parent, internal::Object** child);
- void CollectAllGarbage(const char* gc_reason);
+ void ReportExternalAllocationLimitReached();
};
class V8_EXPORT StartupData {
@@ -6961,7 +6955,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 27;
+ static const int kContextEmbedderDataIndex = 5;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -6994,7 +6988,7 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsPartiallyDependentShift = 4;
- static const int kJSObjectType = 0xb6;
+ static const int kJSObjectType = 0xb7;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
@@ -8176,7 +8170,7 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
if (change_in_bytes > 0 &&
amount - *amount_of_external_allocated_memory_at_last_global_gc >
I::kExternalAllocationLimit) {
- CollectAllGarbage("external memory allocation limit reached.");
+ ReportExternalAllocationLimitReached();
}
*amount_of_external_allocated_memory = amount;
return *amount_of_external_allocated_memory;
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index 71b1cffa08..c560374353 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -15,7 +15,7 @@ rietveld {
verifiers {
reviewer_lgtm {
- committer_list: "v8"
+ committer_list: "project-v8-committers"
}
tree_status {
diff --git a/deps/v8/snapshot_toolchain.gni b/deps/v8/snapshot_toolchain.gni
new file mode 100644
index 0000000000..f41a5ee0e8
--- /dev/null
+++ b/deps/v8/snapshot_toolchain.gni
@@ -0,0 +1,44 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The snapshot needs to be compiled for the host, but compiled with
+# a toolchain that matches the bit-width of the target.
+
+# TODO(GYP): For now we only support 32-bit little-endian target builds from an
+# x64 Linux host. Eventually we need to support all of the host/target
+# configurations v8 runs on.
+if (host_cpu == "x64" && host_os == "linux") {
+ if (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86") {
+ snapshot_toolchain = "//build/toolchain/linux:clang_x86"
+ } else if (target_cpu == "x64") {
+ snapshot_toolchain = "//build/toolchain/linux:clang_x64"
+ } else {
+ assert(false, "Need environment for this arch")
+ }
+} else {
+ snapshot_toolchain = default_toolchain
+}
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 4b95456a17..26b0808740 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -6,6 +6,7 @@ include_rules = [
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"-src/interpreter",
+ "+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecodes.h",
"+src/interpreter/interpreter.h",
"-src/libplatform",
@@ -14,9 +15,10 @@ include_rules = [
specific_include_rules = {
".*\.h": [
- # Note that src/v8.h is the top header for some .cc files, it shouldn't be
- # included in any .h files though. In the long run we should make src/v8.h
- # act like any normal header file, instead of a grab-bag include.
+ # Note that src/v8.h by now is a regular header file, it doesn't provide
+ # any special declarations besides the V8 class. There should be no need
+ # for including it in any .h files though. This rule is just a reminder,
+ # and can be removed once the dust has settled.
"-src/v8.h",
],
"d8\.cc": [
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index c5fcca882d..b89917f73e 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -10,7 +10,7 @@
#include "src/execution.h"
#include "src/factory.h"
#include "src/frames-inl.h"
-#include "src/isolate.h"
+#include "src/isolate-inl.h"
#include "src/list-inl.h"
#include "src/messages.h"
#include "src/property-details.h"
@@ -161,14 +161,13 @@ void Accessors::ArgumentsIteratorSetter(
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- Handle<JSObject> object = Utils::OpenHandle(*info.This());
- Handle<Object> value = Utils::OpenHandle(*val);
-
- LookupIterator it(object, Utils::OpenHandle(*name));
- CHECK_EQ(LookupIterator::ACCESSOR, it.state());
- DCHECK(it.HolderIsReceiverOrHiddenPrototype());
+ Handle<JSObject> object_handle = Utils::OpenHandle(*info.This());
+ Handle<Object> value_handle = Utils::OpenHandle(*val);
+ Handle<Name> name_handle = Utils::OpenHandle(*name);
- if (Object::SetDataProperty(&it, value).is_null()) {
+ if (JSObject::DefinePropertyOrElementIgnoreAttributes(
+ object_handle, name_handle, value_handle, NONE)
+ .is_null()) {
isolate->OptionalRescheduleException(false);
}
}
@@ -225,13 +224,13 @@ void Accessors::ArrayLengthSetter(
uint32_t length = 0;
if (!FastAsArrayLength(isolate, length_obj, &length)) {
Handle<Object> uint32_v;
- if (!Execution::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
+ if (!Object::ToUint32(isolate, length_obj).ToHandle(&uint32_v)) {
isolate->OptionalRescheduleException(false);
return;
}
Handle<Object> number_v;
- if (!Execution::ToNumber(isolate, length_obj).ToHandle(&number_v)) {
+ if (!Object::ToNumber(length_obj).ToHandle(&number_v)) {
isolate->OptionalRescheduleException(false);
return;
}
@@ -319,7 +318,8 @@ void Accessors::ScriptColumnOffsetGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
- Object* res = Script::cast(JSValue::cast(object)->value())->column_offset();
+ Object* res = Smi::FromInt(
+ Script::cast(JSValue::cast(object)->value())->column_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@@ -356,7 +356,7 @@ void Accessors::ScriptIdGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
- Object* id = Script::cast(JSValue::cast(object)->value())->id();
+ Object* id = Smi::FromInt(Script::cast(JSValue::cast(object)->value())->id());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
@@ -463,7 +463,8 @@ void Accessors::ScriptLineOffsetGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
- Object* res = Script::cast(JSValue::cast(object)->value())->line_offset();
+ Object* res =
+ Smi::FromInt(Script::cast(JSValue::cast(object)->value())->line_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@@ -500,7 +501,8 @@ void Accessors::ScriptTypeGetter(
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.This());
- Object* res = Script::cast(JSValue::cast(object)->value())->type();
+ Object* res =
+ Smi::FromInt(Script::cast(JSValue::cast(object)->value())->type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
@@ -815,10 +817,10 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
Handle<Code> code(SharedFunctionInfo::cast(
script->eval_from_shared())->code());
- result = Handle<Object>(
- Smi::FromInt(code->SourcePosition(code->instruction_start() +
- script->eval_from_instructions_offset()->value())),
- isolate);
+ result = Handle<Object>(Smi::FromInt(code->SourcePosition(
+ code->instruction_start() +
+ script->eval_from_instructions_offset())),
+ isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
@@ -930,7 +932,7 @@ MUST_USE_RESULT static MaybeHandle<Object> SetFunctionPrototype(
MaybeHandle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
Handle<Object> prototype) {
- DCHECK(function->should_have_prototype());
+ DCHECK(function->IsConstructor());
Isolate* isolate = function->GetIsolate();
return SetFunctionPrototype(isolate, function, prototype);
}
@@ -992,7 +994,7 @@ void Accessors::FunctionLengthGetter(
} else {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
- if (Compiler::EnsureCompiled(function, KEEP_EXCEPTION)) {
+ if (Compiler::Compile(function, KEEP_EXCEPTION)) {
length = function->shared()->length();
}
if (isolate->has_pending_exception()) {
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 5993859710..051ea4a17b 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -5,7 +5,7 @@
#include "src/api-natives.h"
#include "src/api.h"
-#include "src/isolate.h"
+#include "src/isolate-inl.h"
#include "src/lookup.h"
#include "src/messages.h"
@@ -37,6 +37,25 @@ MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
}
+MaybeHandle<JSFunction> InstantiateFunctionOrMaybeDont(Isolate* isolate,
+ Handle<Object> data) {
+ DCHECK(data->IsFunctionTemplateInfo() || data->IsJSFunction());
+ if (data->IsFunctionTemplateInfo()) {
+ // A function template needs to be instantiated.
+ return InstantiateFunction(isolate,
+ Handle<FunctionTemplateInfo>::cast(data));
+#ifdef V8_JS_ACCESSORS
+ } else if (data->IsJSFunction()) {
+ // If we already have a proper function, we do not need additional work.
+ // (This should only happen for JavaScript API accessors.)
+ return Handle<JSFunction>::cast(data);
+#endif // V8_JS_ACCESSORS
+ } else {
+ UNREACHABLE();
+ return MaybeHandle<JSFunction>();
+ }
+}
+
MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> name,
@@ -44,18 +63,14 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
Handle<Object> setter,
PropertyAttributes attributes) {
if (!getter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, getter,
- InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(getter)),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, getter,
+ InstantiateFunctionOrMaybeDont(isolate, getter),
+ Object);
}
if (!setter->IsUndefined()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, setter,
- InstantiateFunction(isolate,
- Handle<FunctionTemplateInfo>::cast(setter)),
- Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, setter,
+ InstantiateFunctionOrMaybeDont(isolate, setter),
+ Object);
}
RETURN_ON_EXCEPTION(isolate, JSObject::DefineAccessor(object, name, getter,
setter, attributes),
@@ -364,10 +379,19 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<TemplateInfo> info,
- Handle<Name> name,
- Handle<FunctionTemplateInfo> getter,
- Handle<FunctionTemplateInfo> setter,
+ Handle<Name> name, Handle<Object> getter,
+ Handle<Object> setter,
PropertyAttributes attributes) {
+#ifdef V8_JS_ACCESSORS
+ DCHECK(getter.is_null() || getter->IsFunctionTemplateInfo() ||
+ getter->IsJSFunction());
+ DCHECK(setter.is_null() || setter->IsFunctionTemplateInfo() ||
+ setter->IsJSFunction());
+#else
+ DCHECK(getter.is_null() || getter->IsFunctionTemplateInfo());
+ DCHECK(setter.is_null() || setter->IsFunctionTemplateInfo());
+#endif // V8_JS_ACCESSORS
+
const int kSize = 4;
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
@@ -491,9 +515,10 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
map->set_has_indexed_interceptor();
}
- // Set instance call-as-function information in the map.
+ // Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined()) {
- map->set_has_instance_call_handler();
+ map->set_is_callable();
+ map->set_is_constructor(true);
}
// Recursively copy parent instance templates' accessors,
diff --git a/deps/v8/src/api-natives.h b/deps/v8/src/api-natives.h
index c5e398296c..0639677b15 100644
--- a/deps/v8/src/api-natives.h
+++ b/deps/v8/src/api-natives.h
@@ -45,9 +45,8 @@ class ApiNatives {
PropertyAttributes attributes);
static void AddAccessorProperty(Isolate* isolate, Handle<TemplateInfo> info,
- Handle<Name> name,
- Handle<FunctionTemplateInfo> getter,
- Handle<FunctionTemplateInfo> setter,
+ Handle<Name> name, Handle<Object> getter,
+ Handle<Object> setter,
PropertyAttributes attributes);
static void AddNativeDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index a2feebea45..cfc170ee8e 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -27,25 +27,26 @@
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
-#include "src/heap-profiler.h"
-#include "src/heap-snapshot-generator-inl.h"
#include "src/icu_util.h"
+#include "src/isolate-inl.h"
#include "src/json-parser.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/pending-compilation-error-handler.h"
-#include "src/profile-generator-inl.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/heap-profiler.h"
+#include "src/profiler/heap-snapshot-generator-inl.h"
+#include "src/profiler/profile-generator-inl.h"
+#include "src/profiler/sampler.h"
#include "src/property.h"
#include "src/property-details.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
#include "src/runtime-profiler.h"
-#include "src/sampler.h"
#include "src/scanner-character-streams.h"
#include "src/simulator.h"
#include "src/snapshot/natives.h"
@@ -194,10 +195,10 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
ScriptOriginOptions options(script->origin_options());
v8::ScriptOrigin origin(
Utils::ToLocal(scriptName),
- v8::Integer::New(v8_isolate, script->line_offset()->value()),
- v8::Integer::New(v8_isolate, script->column_offset()->value()),
+ v8::Integer::New(v8_isolate, script->line_offset()),
+ v8::Integer::New(v8_isolate, script->column_offset()),
v8::Boolean::New(v8_isolate, options.IsSharedCrossOrigin()),
- v8::Integer::New(v8_isolate, script->id()->value()),
+ v8::Integer::New(v8_isolate, script->id()),
v8::Boolean::New(v8_isolate, options.IsEmbedderDebugScript()),
Utils::ToLocal(source_map_url),
v8::Boolean::New(v8_isolate, options.IsOpaque()));
@@ -954,6 +955,25 @@ void Template::SetAccessorProperty(
}
+#ifdef V8_JS_ACCESSORS
+void Template::SetAccessorProperty(v8::Local<v8::Name> name,
+ v8::Local<Function> getter,
+ v8::Local<Function> setter,
+ v8::PropertyAttribute attribute) {
+ auto templ = Utils::OpenHandle(this);
+ auto isolate = templ->GetIsolate();
+ ENTER_V8(isolate);
+ DCHECK(!name.IsEmpty());
+ DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
+ i::HandleScope scope(isolate);
+ i::ApiNatives::AddAccessorProperty(
+ isolate, templ, Utils::OpenHandle(*name),
+ Utils::OpenHandle(*getter, true), Utils::OpenHandle(*setter, true),
+ static_cast<PropertyAttributes>(attribute));
+}
+#endif // V8_JS_ACCESSORS
+
+
// --- F u n c t i o n T e m p l a t e ---
static void InitializeFunctionTemplate(
i::Handle<i::FunctionTemplateInfo> info) {
@@ -1369,13 +1389,13 @@ static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
EnsureNotInstantiated(cons, "ObjectTemplateSetNamedPropertyHandler");
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
+ obj->set_flags(0);
if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
- obj->set_flags(0);
obj->set_can_intercept_symbols(
!(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
@@ -1456,6 +1476,7 @@ void ObjectTemplate::SetHandler(
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetHandler");
auto obj = i::Handle<i::InterceptorInfo>::cast(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE));
+ obj->set_flags(0);
if (config.getter != 0) SET_FIELD_WRAPPED(obj, set_getter, config.getter);
if (config.setter != 0) SET_FIELD_WRAPPED(obj, set_setter, config.setter);
@@ -1464,7 +1485,6 @@ void ObjectTemplate::SetHandler(
if (config.enumerator != 0) {
SET_FIELD_WRAPPED(obj, set_enumerator, config.enumerator);
}
- obj->set_flags(0);
obj->set_all_can_read(static_cast<int>(config.flags) &
static_cast<int>(PropertyHandlerFlags::kAllCanRead));
@@ -1610,7 +1630,7 @@ int UnboundScript::GetId() {
i::Handle<i::SharedFunctionInfo> function_info(
i::SharedFunctionInfo::cast(*obj));
i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- return script->id()->value();
+ return script->id();
}
@@ -1983,12 +2003,12 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
}
if (!origin.ResourceLineOffset().IsEmpty()) {
- script->set_line_offset(i::Smi::FromInt(
- static_cast<int>(origin.ResourceLineOffset()->Value())));
+ script->set_line_offset(
+ static_cast<int>(origin.ResourceLineOffset()->Value()));
}
if (!origin.ResourceColumnOffset().IsEmpty()) {
- script->set_column_offset(i::Smi::FromInt(
- static_cast<int>(origin.ResourceColumnOffset()->Value())));
+ script->set_column_offset(
+ static_cast<int>(origin.ResourceColumnOffset()->Value()));
}
script->set_origin_options(origin.Options());
if (!origin.SourceMapUrl().IsEmpty()) {
@@ -2525,7 +2545,7 @@ Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
i::Handle<i::JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
- i::Runtime::WeakCollectionInitialize(isolate, weakmap);
+ i::JSWeakCollection::Initialize(weakmap, isolate);
return Utils::NativeWeakMapToLocal(weakmap);
}
@@ -2548,7 +2568,7 @@ void NativeWeakMap::Set(Local<Value> v8_key, Local<Value> v8_value) {
return;
}
int32_t hash = i::Object::GetOrCreateHash(isolate, key)->value();
- i::Runtime::WeakCollectionSet(weak_collection, key, value, hash);
+ i::JSWeakCollection::Set(weak_collection, key, value, hash);
}
@@ -2611,7 +2631,8 @@ bool NativeWeakMap::Delete(Local<Value> v8_key) {
DCHECK(false);
return false;
}
- return i::Runtime::WeakCollectionDelete(weak_collection, key);
+ int32_t hash = i::Object::GetOrCreateHash(isolate, key)->value();
+ return i::JSWeakCollection::Delete(weak_collection, key, hash);
}
@@ -2853,7 +2874,7 @@ MaybeLocal<String> Value::ToString(Local<Context> context) const {
PREPARE_FOR_EXECUTION(context, "ToString", String);
Local<String> result;
has_pending_exception =
- !ToLocal<String>(i::Execution::ToString(isolate, obj), &result);
+ !ToLocal<String>(i::Object::ToString(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(String);
RETURN_ESCAPED(result);
}
@@ -2918,8 +2939,7 @@ MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
if (obj->IsNumber()) return ToApiHandle<Number>(obj);
PREPARE_FOR_EXECUTION(context, "ToNumber", Number);
Local<Number> result;
- has_pending_exception =
- !ToLocal<Number>(i::Execution::ToNumber(isolate, obj), &result);
+ has_pending_exception = !ToLocal<Number>(i::Object::ToNumber(obj), &result);
RETURN_ON_FAILED_EXECUTION(Number);
RETURN_ESCAPED(result);
}
@@ -2936,7 +2956,7 @@ MaybeLocal<Integer> Value::ToInteger(Local<Context> context) const {
PREPARE_FOR_EXECUTION(context, "ToInteger", Integer);
Local<Integer> result;
has_pending_exception =
- !ToLocal<Integer>(i::Execution::ToInteger(isolate, obj), &result);
+ !ToLocal<Integer>(i::Object::ToInteger(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Integer);
RETURN_ESCAPED(result);
}
@@ -2953,7 +2973,7 @@ MaybeLocal<Int32> Value::ToInt32(Local<Context> context) const {
Local<Int32> result;
PREPARE_FOR_EXECUTION(context, "ToInt32", Int32);
has_pending_exception =
- !ToLocal<Int32>(i::Execution::ToInt32(isolate, obj), &result);
+ !ToLocal<Int32>(i::Object::ToInt32(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Int32);
RETURN_ESCAPED(result);
}
@@ -2968,9 +2988,9 @@ MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsSmi()) return ToApiHandle<Uint32>(obj);
Local<Uint32> result;
- PREPARE_FOR_EXECUTION(context, "ToUInt32", Uint32);
+ PREPARE_FOR_EXECUTION(context, "ToUint32", Uint32);
has_pending_exception =
- !ToLocal<Uint32>(i::Execution::ToUint32(isolate, obj), &result);
+ !ToLocal<Uint32>(i::Object::ToUint32(isolate, obj), &result);
RETURN_ON_FAILED_EXECUTION(Uint32);
RETURN_ESCAPED(result);
}
@@ -3243,7 +3263,7 @@ Maybe<double> Value::NumberValue(Local<Context> context) const {
if (obj->IsNumber()) return Just(obj->Number());
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "NumberValue", double);
i::Handle<i::Object> num;
- has_pending_exception = !i::Execution::ToNumber(isolate, obj).ToHandle(&num);
+ has_pending_exception = !i::Object::ToNumber(obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double);
return Just(num->Number());
}
@@ -3264,8 +3284,7 @@ Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
num = obj;
} else {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
- has_pending_exception =
- !i::Execution::ToInteger(isolate, obj).ToHandle(&num);
+ has_pending_exception = !i::Object::ToInteger(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
}
return Just(num->IsSmi() ? static_cast<int64_t>(i::Smi::cast(*num)->value())
@@ -3291,7 +3310,7 @@ Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
if (obj->IsNumber()) return Just(NumberToInt32(*obj));
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Int32Value", int32_t);
i::Handle<i::Object> num;
- has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num);
+ has_pending_exception = !i::Object::ToInt32(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int32_t);
return Just(num->IsSmi() ? i::Smi::cast(*num)->value()
: static_cast<int32_t>(num->Number()));
@@ -3310,7 +3329,7 @@ Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
if (obj->IsNumber()) return Just(NumberToUint32(*obj));
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Uint32Value", uint32_t);
i::Handle<i::Object> num;
- has_pending_exception = !i::Execution::ToUint32(isolate, obj).ToHandle(&num);
+ has_pending_exception = !i::Object::ToUint32(isolate, obj).ToHandle(&num);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(uint32_t);
return Just(num->IsSmi() ? static_cast<uint32_t>(i::Smi::cast(*num)->value())
: static_cast<uint32_t>(num->Number()));
@@ -3333,7 +3352,7 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
PREPARE_FOR_EXECUTION(context, "ToArrayIndex", Uint32);
i::Handle<i::Object> string_obj;
has_pending_exception =
- !i::Execution::ToString(isolate, self).ToHandle(&string_obj);
+ !i::Object::ToString(isolate, self).ToHandle(&string_obj);
RETURN_ON_FAILED_EXECUTION(Uint32);
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
@@ -3364,22 +3383,7 @@ Local<Uint32> Value::ToArrayIndex() const {
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
auto self = Utils::OpenHandle(this);
auto other = Utils::OpenHandle(*that);
- if (self->IsSmi() && other->IsSmi()) {
- return Just(self->Number() == other->Number());
- }
- if (self->IsJSObject() && other->IsJSObject()) {
- return Just(*self == *other);
- }
- PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Value::Equals()", bool);
- i::Handle<i::Object> args[] = { other };
- i::Handle<i::JSFunction> fun(i::JSFunction::cast(
- isolate->js_builtins_object()->javascript_builtin(i::Builtins::EQUALS)));
- i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, fun, self, arraysize(args), args)
- .ToHandle(&result);
- RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
- return Just(*result == i::Smi::FromInt(i::EQUAL));
+ return i::Object::Equals(self, other);
}
@@ -3531,7 +3535,7 @@ static i::MaybeHandle<i::Object> DefineObjectProperty(
i::Handle<i::Name> name;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, name,
- i::Runtime::ToName(isolate, key),
+ i::Object::ToName(isolate, key),
i::MaybeHandle<i::Object>());
return i::JSObject::DefinePropertyOrElementIgnoreAttributes(js_object, name,
@@ -3614,8 +3618,8 @@ Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
auto self = Utils::OpenHandle(this);
auto key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
- has_pending_exception = !i::Execution::ToString(
- isolate, key_obj).ToHandle(&key_obj);
+ has_pending_exception =
+ !i::Object::ToString(isolate, key_obj).ToHandle(&key_obj);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
}
auto key_name = i::Handle<i::Name>::cast(key_obj);
@@ -3701,8 +3705,7 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
return Local<Object>();
}
}
- return Utils::ToLocal(
- i::handle(i::JSObject::cast(iter.GetCurrent()), isolate));
+ return Utils::ToLocal(i::handle(iter.GetCurrent<i::JSObject>(), isolate));
}
@@ -3855,7 +3858,7 @@ Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
} else {
// Convert the key to a name - possibly by calling back into JavaScript.
i::Handle<i::Name> name;
- if (i::Runtime::ToName(isolate, key_obj).ToHandle(&name)) {
+ if (i::Object::ToName(isolate, key_obj).ToHandle(&name)) {
maybe = i::JSReceiver::HasProperty(self, name);
}
}
@@ -4259,22 +4262,9 @@ MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- i::Handle<i::JSFunction> fun;
- if (self->IsJSFunction()) {
- fun = i::Handle<i::JSFunction>::cast(self);
- } else {
- i::Handle<i::Object> delegate;
- has_pending_exception = !i::Execution::TryGetFunctionDelegate(isolate, self)
- .ToHandle(&delegate);
- RETURN_ON_FAILED_EXECUTION(Value);
- fun = i::Handle<i::JSFunction>::cast(delegate);
- recv_obj = self;
- }
Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(
- i::Execution::Call(isolate, fun, recv_obj, argc, args, true),
- &result);
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::Call(isolate, self, recv_obj, argc, args), &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -4297,29 +4287,11 @@ MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- if (self->IsJSFunction()) {
- auto fun = i::Handle<i::JSFunction>::cast(self);
- Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(i::Execution::New(fun, argc, args), &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- RETURN_ESCAPED(result);
- }
- i::Handle<i::Object> delegate;
- has_pending_exception = !i::Execution::TryGetConstructorDelegate(
- isolate, self).ToHandle(&delegate);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::New(isolate, self, self, argc, args), &result);
RETURN_ON_FAILED_EXECUTION(Value);
- if (!delegate->IsUndefined()) {
- auto fun = i::Handle<i::JSFunction>::cast(delegate);
- Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(i::Execution::Call(isolate, fun, self, argc, args),
- &result);
- RETURN_ON_FAILED_EXECUTION(Value);
- DCHECK(!delegate->IsUndefined());
- RETURN_ESCAPED(result);
- }
- return MaybeLocal<Value>();
+ RETURN_ESCAPED(result);
}
@@ -4388,10 +4360,8 @@ MaybeLocal<v8::Value> Function::Call(Local<Context> context,
STATIC_ASSERT(sizeof(v8::Local<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
Local<Value> result;
- has_pending_exception =
- !ToLocal<Value>(
- i::Execution::Call(isolate, self, recv_obj, argc, args, true),
- &result);
+ has_pending_exception = !ToLocal<Value>(
+ i::Execution::Call(isolate, self, recv_obj, argc, args), &result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@@ -4485,7 +4455,7 @@ int Function::ScriptId() const {
return v8::UnboundScript::kNoScriptId;
}
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- return script->id()->value();
+ return script->id();
}
@@ -5337,7 +5307,7 @@ void v8::V8::SetEntropySource(EntropySource entropy_source) {
void v8::V8::SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver) {
- i::V8::SetReturnAddressLocationResolver(return_address_resolver);
+ i::StackFrame::SetReturnAddressLocationResolver(return_address_resolver);
}
@@ -5785,7 +5755,7 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromTwoByte(resource)
.ToHandleChecked();
- i_isolate->heap()->external_string_table()->AddString(*string);
+ i_isolate->heap()->RegisterExternalString(*string);
return Utils::ToLocal(string);
}
@@ -5809,7 +5779,7 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromOneByte(resource)
.ToHandleChecked();
- i_isolate->heap()->external_string_table()->AddString(*string);
+ i_isolate->heap()->RegisterExternalString(*string);
return Utils::ToLocal(string);
}
@@ -5837,7 +5807,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
DCHECK(!CanMakeExternal() || result);
if (result) {
DCHECK(obj->IsExternalString());
- isolate->heap()->external_string_table()->AddString(*obj);
+ isolate->heap()->RegisterExternalString(*obj);
}
return result;
}
@@ -5861,7 +5831,7 @@ bool v8::String::MakeExternal(
DCHECK(!CanMakeExternal() || result);
if (result) {
DCHECK(obj->IsExternalString());
- isolate->heap()->external_string_table()->AddString(*obj);
+ isolate->heap()->RegisterExternalString(*obj);
}
return result;
}
@@ -6153,7 +6123,7 @@ void Map::Clear() {
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, "Map::Clear");
ENTER_V8(isolate);
- i::Runtime::JSMapClear(isolate, self);
+ i::JSMap::Clear(self);
}
@@ -6164,7 +6134,7 @@ MaybeLocal<Value> Map::Get(Local<Context> context, Local<Value> key) {
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
has_pending_exception =
!ToLocal<Value>(i::Execution::Call(isolate, isolate->map_get(), self,
- arraysize(argv), argv, false),
+ arraysize(argv), argv),
&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
@@ -6178,9 +6148,9 @@ MaybeLocal<Map> Map::Set(Local<Context> context, Local<Value> key,
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
Utils::OpenHandle(*value)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->map_set(), self, arraysize(argv),
- argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->map_set(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Map);
RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
}
@@ -6191,9 +6161,9 @@ Maybe<bool> Map::Has(Local<Context> context, Local<Value> key) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->map_has(), self, arraysize(argv),
- argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->map_has(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue());
}
@@ -6204,9 +6174,9 @@ Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->map_delete(), self, arraysize(argv),
- argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->map_delete(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue());
}
@@ -6243,7 +6213,8 @@ MaybeLocal<Map> Map::FromArray(Local<Context> context, Local<Array> array) {
has_pending_exception =
!i::Execution::Call(isolate, isolate->map_from_array(),
isolate->factory()->undefined_value(),
- arraysize(argv), argv, false).ToHandle(&result);
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Map);
RETURN_ESCAPED(Local<Map>::Cast(Utils::ToLocal(result)));
}
@@ -6269,7 +6240,7 @@ void Set::Clear() {
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, "Set::Clear");
ENTER_V8(isolate);
- i::Runtime::JSSetClear(isolate, self);
+ i::JSSet::Clear(self);
}
@@ -6278,9 +6249,9 @@ MaybeLocal<Set> Set::Add(Local<Context> context, Local<Value> key) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->set_add(), self, arraysize(argv),
- argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->set_add(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Set);
RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
}
@@ -6291,9 +6262,9 @@ Maybe<bool> Set::Has(Local<Context> context, Local<Value> key) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->set_has(), self, arraysize(argv),
- argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->set_has(), self,
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue());
}
@@ -6304,9 +6275,9 @@ Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->set_delete(), self, arraysize(argv),
- argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->set_delete(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(result->IsTrue());
}
@@ -6340,7 +6311,8 @@ MaybeLocal<Set> Set::FromArray(Local<Context> context, Local<Array> array) {
has_pending_exception =
!i::Execution::Call(isolate, isolate->set_from_array(),
isolate->factory()->undefined_value(),
- arraysize(argv), argv, false).ToHandle(&result);
+ arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Set);
RETURN_ESCAPED(Local<Set>::Cast(Utils::ToLocal(result)));
}
@@ -6355,12 +6327,10 @@ bool Value::IsPromise() const {
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, "Promise::Resolver::New", Resolver);
i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(
- isolate,
- isolate->promise_create(),
- isolate->factory()->undefined_value(),
- 0, NULL,
- false).ToHandle(&result);
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->promise_create(),
+ isolate->factory()->undefined_value(), 0, NULL)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
RETURN_ESCAPED(Local<Promise::Resolver>::Cast(Utils::ToLocal(result)));
}
@@ -6383,12 +6353,11 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
- has_pending_exception = i::Execution::Call(
- isolate,
- isolate->promise_resolve(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv,
- false).is_null();
+ has_pending_exception =
+ i::Execution::Call(isolate, isolate->promise_resolve(),
+ isolate->factory()->undefined_value(), arraysize(argv),
+ argv)
+ .is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -6405,12 +6374,11 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
- has_pending_exception = i::Execution::Call(
- isolate,
- isolate->promise_reject(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv,
- false).is_null();
+ has_pending_exception =
+ i::Execution::Call(isolate, isolate->promise_reject(),
+ isolate->factory()->undefined_value(), arraysize(argv),
+ argv)
+ .is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@@ -6428,9 +6396,9 @@ MaybeLocal<Promise> Promise::Chain(Local<Context> context,
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->promise_chain(), self,
- arraysize(argv), argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->promise_chain(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise);
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
@@ -6448,9 +6416,9 @@ MaybeLocal<Promise> Promise::Catch(Local<Context> context,
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->promise_catch(), self,
- arraysize(argv), argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->promise_catch(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise);
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
@@ -6468,9 +6436,9 @@ MaybeLocal<Promise> Promise::Then(Local<Context> context,
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
i::Handle<i::Object> result;
- has_pending_exception =
- !i::Execution::Call(isolate, isolate->promise_then(), self,
- arraysize(argv), argv, false).ToHandle(&result);
+ has_pending_exception = !i::Execution::Call(isolate, isolate->promise_then(),
+ self, arraysize(argv), argv)
+ .ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(Promise);
RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
@@ -6508,8 +6476,7 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8::ArrayBuffer::Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(isolate->heap()->InNewSpace(*self),
- self->backing_store());
+ isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
}
@@ -6535,7 +6502,7 @@ void v8::ArrayBuffer::Neuter() {
"Only neuterable ArrayBuffers can be neutered");
LOG_API(obj->GetIsolate(), "v8::ArrayBuffer::Neuter()");
ENTER_V8(isolate);
- i::Runtime::NeuterArrayBuffer(obj);
+ obj->Neuter();
}
@@ -6551,7 +6518,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
- i::Runtime::SetupArrayBufferAllocatingData(i_isolate, obj, byte_length);
+ i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length);
return Utils::ToLocal(obj);
}
@@ -6566,9 +6533,9 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
- i::Runtime::SetupArrayBuffer(i_isolate, obj,
- mode == ArrayBufferCreationMode::kExternalized,
- data, byte_length);
+ i::JSArrayBuffer::Setup(obj, i_isolate,
+ mode == ArrayBufferCreationMode::kExternalized, data,
+ byte_length);
return Utils::ToLocal(obj);
}
@@ -6718,8 +6685,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8::SharedArrayBuffer::Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
- isolate->heap()->UnregisterArrayBuffer(isolate->heap()->InNewSpace(*self),
- self->backing_store());
+ isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
}
@@ -6748,8 +6714,8 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
- i::Runtime::SetupArrayBufferAllocatingData(i_isolate, obj, byte_length, true,
- i::SharedFlag::kShared);
+ i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
+ i::SharedFlag::kShared);
return Utils::ToLocalShared(obj);
}
@@ -6765,9 +6731,9 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
- i::Runtime::SetupArrayBuffer(i_isolate, obj,
- mode == ArrayBufferCreationMode::kExternalized,
- data, byte_length, i::SharedFlag::kShared);
+ i::JSArrayBuffer::Setup(obj, i_isolate,
+ mode == ArrayBufferCreationMode::kExternalized, data,
+ byte_length, i::SharedFlag::kShared);
return Utils::ToLocalShared(obj);
}
@@ -6871,32 +6837,11 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
}
-void Isolate::CollectAllGarbage(const char* gc_reason) {
+void Isolate::ReportExternalAllocationLimitReached() {
i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap();
- DCHECK_EQ(heap->gc_state(), i::Heap::NOT_IN_GC);
- if (heap->incremental_marking()->IsStopped()) {
- if (heap->incremental_marking()->CanBeActivated()) {
- heap->StartIncrementalMarking(
- i::Heap::kNoGCFlags,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
- } else {
- heap->CollectAllGarbage(
- i::Heap::kNoGCFlags, gc_reason,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing);
- }
- } else {
- // Incremental marking is turned on an has already been started.
-
- // TODO(mlippautz): Compute the time slice for incremental marking based on
- // memory pressure.
- double deadline = heap->MonotonicallyIncreasingTimeInMs() +
- i::FLAG_external_allocation_limit_incremental_time;
- heap->AdvanceIncrementalMarking(
- 0, deadline, i::IncrementalMarking::StepActions(
- i::IncrementalMarking::GC_VIA_STACK_GUARD,
- i::IncrementalMarking::FORCE_MARKING,
- i::IncrementalMarking::FORCE_COMPLETION));
- }
+ if (heap->gc_state() != i::Heap::NOT_IN_GC) return;
+ heap->ReportExternalMemoryPressure(
+ "external memory allocation limit reached.");
}
@@ -7160,13 +7105,6 @@ void Isolate::Exit() {
}
-void Isolate::SetAbortOnUncaughtExceptionCallback(
- AbortOnUncaughtExceptionCallback callback) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- isolate->SetAbortOnUncaughtExceptionCallback(callback);
-}
-
-
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@@ -7258,22 +7196,25 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
size_t Isolate::NumberOfTrackedHeapObjectTypes() {
- return i::Heap::OBJECT_STATS_COUNT;
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Heap* heap = isolate->heap();
+ return heap->NumberOfTrackedHeapObjectTypes();
}
bool Isolate::GetHeapObjectStatisticsAtLastGC(
HeapObjectStatistics* object_statistics, size_t type_index) {
if (!object_statistics) return false;
- if (type_index >= i::Heap::OBJECT_STATS_COUNT) return false;
if (!i::FLAG_track_gc_object_stats) return false;
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Heap* heap = isolate->heap();
+ if (type_index >= heap->NumberOfTrackedHeapObjectTypes()) return false;
+
const char* object_type;
const char* object_sub_type;
- size_t object_count = heap->object_count_last_gc(type_index);
- size_t object_size = heap->object_size_last_gc(type_index);
+ size_t object_count = heap->ObjectCountAtLastGC(type_index);
+ size_t object_size = heap->ObjectSizeAtLastGC(type_index);
if (!heap->GetObjectTypeName(type_index, &object_type, &object_sub_type)) {
// There should be no objects counted when the type is unknown.
DCHECK_EQ(object_count, 0U);
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 523000ec3a..8f8956c9e1 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -622,7 +622,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // CpuFeatures::FlushICache(pc, sizeof(target));
+ // Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@@ -640,7 +640,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, 2 * kInstrSize);
+ Assembler::FlushICacheWithoutIsolate(pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@@ -660,7 +660,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, 4 * kInstrSize);
+ Assembler::FlushICacheWithoutIsolate(pc, 4 * kInstrSize);
}
}
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 633b5d12c0..50c707d2a0 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -34,6 +34,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
+#include "src/arm/assembler-arm.h"
+
#if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
@@ -300,6 +302,13 @@ MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
rm_ = no_reg;
offset_ = offset;
am_ = am;
+
+ // Accesses below the stack pointer are not safe, and are prohibited by the
+ // ABI. We can check obvious violations here.
+ if (rn.is(sp)) {
+ if (am == Offset) DCHECK_LE(0, offset);
+ if (am == NegOffset) DCHECK_GE(0, offset);
+ }
}
@@ -448,6 +457,8 @@ const Instr kLdrStrInstrTypeMask = 0xffff0000;
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ pending_32_bit_constants_(&pending_32_bit_constants_buffer_[0]),
+ pending_64_bit_constants_(&pending_64_bit_constants_buffer_[0]),
constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -465,6 +476,12 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
DCHECK(const_pool_blocked_nesting_ == 0);
+ if (pending_32_bit_constants_ != &pending_32_bit_constants_buffer_[0]) {
+ delete[] pending_32_bit_constants_;
+ }
+ if (pending_64_bit_constants_ != &pending_64_bit_constants_buffer_[0]) {
+ delete[] pending_64_bit_constants_;
+ }
}
@@ -3664,6 +3681,15 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
if (num_pending_32_bit_constants_ == 0) {
first_const_pool_32_use_ = position;
+ } else if (num_pending_32_bit_constants_ == kMinNumPendingConstants &&
+ pending_32_bit_constants_ ==
+ &pending_32_bit_constants_buffer_[0]) {
+ // Inline buffer is full, switch to dynamically allocated buffer.
+ pending_32_bit_constants_ =
+ new ConstantPoolEntry[kMaxNumPending32Constants];
+ std::copy(&pending_32_bit_constants_buffer_[0],
+ &pending_32_bit_constants_buffer_[kMinNumPendingConstants],
+ &pending_32_bit_constants_[0]);
}
ConstantPoolEntry entry(position, value, sharing_ok);
pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
@@ -3684,6 +3710,15 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
if (num_pending_64_bit_constants_ == 0) {
first_const_pool_64_use_ = position;
+ } else if (num_pending_64_bit_constants_ == kMinNumPendingConstants &&
+ pending_64_bit_constants_ ==
+ &pending_64_bit_constants_buffer_[0]) {
+ // Inline buffer is full, switch to dynamically allocated buffer.
+ pending_64_bit_constants_ =
+ new ConstantPoolEntry[kMaxNumPending64Constants];
+ std::copy(&pending_64_bit_constants_buffer_[0],
+ &pending_64_bit_constants_buffer_[kMinNumPendingConstants],
+ &pending_64_bit_constants_[0]);
}
ConstantPoolEntry entry(position, value);
pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index d0fcac206e..1d1cc485d5 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -1302,6 +1302,14 @@ class Assembler : public AssemblerBase {
add(sp, sp, Operand(kPointerSize));
}
+ void vpush(DwVfpRegister src, Condition cond = al) {
+ vstm(db_w, sp, src, src, cond);
+ }
+
+ void vpop(DwVfpRegister dst, Condition cond = al) {
+ vldm(ia_w, sp, dst, dst, cond);
+ }
+
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
@@ -1465,6 +1473,7 @@ class Assembler : public AssemblerBase {
static const int kMaxDistToIntPool = 4*KB;
static const int kMaxDistToFPPool = 1*KB;
// All relocations could be integer, it therefore acts as the limit.
+ static const int kMinNumPendingConstants = 4;
static const int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize;
static const int kMaxNumPending64Constants = kMaxDistToFPPool / kInstrSize;
@@ -1598,8 +1607,10 @@ class Assembler : public AssemblerBase {
// pending relocation entry per instruction.
// The buffers of pending constant pool entries.
- ConstantPoolEntry pending_32_bit_constants_[kMaxNumPending32Constants];
- ConstantPoolEntry pending_64_bit_constants_[kMaxNumPending64Constants];
+ ConstantPoolEntry pending_32_bit_constants_buffer_[kMinNumPendingConstants];
+ ConstantPoolEntry pending_64_bit_constants_buffer_[kMinNumPendingConstants];
+ ConstantPoolEntry* pending_32_bit_constants_;
+ ConstantPoolEntry* pending_64_bit_constants_;
// Number of pending constant pool entries in the 32 bits buffer.
int num_pending_32_bit_constants_;
// Number of pending constant pool entries in the 64 bits buffer.
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index cf91753e1a..ea2c92e640 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -24,12 +24,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- r0 : number of arguments excluding receiver
// -- r1 : called function (only guaranteed when
// extra_args requires it)
- // -- cp : context
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument (argc == r0)
// -- sp[4 * argc] : receiver
// -----------------------------------
+ __ AssertFunction(r1);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -136,7 +143,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -144,121 +152,128 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
- Register function = r1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r2);
- __ cmp(function, Operand(r2));
- __ Assert(eq, kUnexpectedStringFunction);
+ // 1. Load the first argument into r0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lo, &no_arguments);
+ __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ Drop(2);
}
- // Load the first arguments in r0 and get rid of the rest.
- Label no_arguments;
- __ cmp(r0, Operand::Zero());
- __ b(eq, &no_arguments);
- // First args = sp[(argc - 1) * 4].
- __ sub(r0, r0, Operand(1));
- __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = r2;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(r0, // Input.
- argument, // Result.
- r3, // Scratch.
- r4, // Scratch.
- r5, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
- __ bind(&argument_is_string);
+ // 2a. At least one argument, return r0 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(r0, &to_string);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+ __ b(hi, &to_string);
+ __ b(eq, &symbol_descriptive_string);
+ __ Ret();
+ }
- // ----------- S t a t e -------------
- // -- r2 : argument converted to string
- // -- r1 : constructor function
- // -- lr : return address
- // -----------------------------------
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
+ __ Ret(1);
+ }
- Label gc_required;
- __ Allocate(JSValue::kSize,
- r0, // Result.
- r3, // Scratch.
- r4, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = r3;
- __ LoadGlobalFunctionInitialMap(function, map, r4);
- if (FLAG_debug_code) {
- __ ldrb(r4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
- __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmp(r4, Operand::Zero());
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 3a. Convert r0 to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
- __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+ // 3b. Convert symbol in r0 to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ Push(r0);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- __ str(argument, FieldMemOperand(r0, JSValue::kValueOffset));
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
- __ Ret();
+ // 1. Load the first argument into r0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ sub(r0, r0, Operand(1), SetCC);
+ __ b(lo, &no_arguments);
+ __ ldr(r0, MemOperand(sp, r0, LSL, kPointerSizeLog2, PreIndex));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadRoot(r0, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ bind(&done);
+ }
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(r0, &convert_argument);
-
- // Is it a String?
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ tst(r3, Operand(kIsNotStringMask));
- __ b(ne, &convert_argument);
- __ mov(argument, r0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
- __ b(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into r2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
+ // 2. Make sure r0 is a string.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ push(r0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ Label convert, done_convert;
+ __ JumpIfSmi(r0, &convert);
+ __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE);
+ __ b(lo, &done_convert);
+ __ bind(&convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(r1);
+ __ CallStub(&stub);
+ __ Pop(r1);
+ }
+ __ bind(&done_convert);
}
- __ pop(function);
- __ mov(argument, r0);
- __ b(&argument_is_string);
- // Load the empty string into r2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ b(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- r0 : the first argument
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Move(r2, r0);
+ __ Allocate(JSValue::kSize, r0, r3, r4, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in r0.
+ __ LoadGlobalFunctionInitialMap(r1, r3, r4);
+ __ str(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2, FieldMemOperand(r0, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Move(r3, Smi::FromInt(JSValue::kSize));
+ __ Push(r1, r2, r3);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(r1, r2);
+ }
+ __ b(&done_allocate);
}
- __ Ret();
}
@@ -310,8 +325,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -321,9 +335,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
Isolate* isolate = masm->isolate();
// Enter a construct frame.
@@ -398,9 +409,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
Label rt_call_reload_new_target;
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
- if (create_memento) {
- __ add(r3, r3, Operand(AllocationMemento::kSize / kPointerSize));
- }
__ Allocate(r3, r4, r5, r6, &rt_call_reload_new_target, SIZE_IN_WORDS);
@@ -408,7 +416,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r1: constructor function
// r2: initial map
- // r3: object size (including memento if create_memento)
+ // r3: object size
// r4: JSObject (not tagged)
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
@@ -422,7 +430,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r1: constructor function
// r2: initial map
- // r3: object size (in words, including memento if create_memento)
+ // r3: object size
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
@@ -461,25 +469,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ sub(ip, r3, Operand(AllocationMemento::kSize / kPointerSize));
- __ add(r0, r4, Operand(ip, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
-
- // Fill in memento fields.
- // r5: points to the allocated but uninitialized memento.
- __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
- DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- // Load the AllocationSite
- __ ldr(r6, MemOperand(sp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(r6, r0);
- DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
- __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
- } else {
- __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
- __ InitializeFieldsWithFiller(r5, r0, r6);
- }
+ __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
+ __ InitializeFieldsWithFiller(r5, r0, r6);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -498,47 +489,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r3: original constructor
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
- __ push(r2); // argument 1: allocation site
- }
__ push(r1); // argument 2/1: constructor function
__ push(r3); // argument 3/2: original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mov(r4, r0);
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
// Receiver for constructor call allocated.
// r4: JSObject
__ bind(&allocated);
- if (create_memento) {
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, r5);
- __ b(eq, &count_incremented);
- // r2 is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ ldr(r3, FieldMemOperand(r2,
- AllocationSite::kPretenureCreateCountOffset));
- __ add(r3, r3, Operand(Smi::FromInt(1)));
- __ str(r3, FieldMemOperand(r2,
- AllocationSite::kPretenureCreateCountOffset));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ pop(r3);
__ pop(r1);
@@ -642,12 +602,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -742,8 +702,7 @@ enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers r2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset, Register argc,
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -763,12 +722,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ b(gt, &okay); // Signed comparison.
// Out of stack space.
- __ ldr(r1, MemOperand(fp, calleeOffset));
- if (argc_is_tagged == kArgcIsUntaggedInt) {
- __ SmiTag(argc);
- }
- __ Push(r1, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -777,7 +731,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
- // r0: code entry
+ // r0: new.target
// r1: function
// r2: receiver
// r3: argc
@@ -792,22 +746,23 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ mov(cp, Operand(context_address));
+ __ ldr(cp, MemOperand(cp));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
- __ push(r1);
- __ push(r2);
+ __ Push(r1, r2);
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Clobbers r2.
- Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, r3, kArgcIsUntaggedInt);
+
+ // Remember new.target.
+ __ mov(r5, r0);
// Copy arguments to the stack in a loop.
// r1: function
@@ -825,6 +780,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmp(r4, r2);
__ b(ne, &loop);
+ // Setup new.target and argc.
+ __ mov(r0, Operand(r3));
+ __ mov(r3, Operand(r5));
+
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
@@ -837,17 +796,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r9, Operand(r4));
}
- // Invoke the code and pass argc as r0.
- __ mov(r0, Operand(r3));
- if (is_construct) {
- // No type feedback cell is available
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(r0);
- __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
- }
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
// Exit the JS frame and remove the parameters (except function), and
// return.
// Respect ABI stack constraint.
@@ -920,7 +874,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -1003,8 +957,11 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments.
- __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+
+ // Drop receiver + arguments and return.
+ __ ldr(ip, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ add(sp, sp, ip, LeaveCC);
__ Jump(lr);
}
@@ -1253,127 +1210,30 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
- { Label done;
+ {
+ Label done;
__ cmp(r0, Operand::Zero());
__ b(ne, &done);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ add(r0, r0, Operand(1));
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
+ // 2. Get the callable to call (passed as receiver) from the stack.
// r0: actual number of arguments
- Label slow, non_function;
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ JumpIfSmi(r1, &non_function);
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- // r0: actual number of arguments
- // r1: function
- Label shift_arguments;
- __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &shift_arguments);
-
- // Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &shift_arguments);
- // Compute the receiver in sloppy mode.
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ ldr(r2, MemOperand(r2, -kPointerSize));
- // r0: actual number of arguments
- // r1: function
- // r2: first argument
- __ JumpIfSmi(r2, &convert_to_object);
-
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_proxy);
- __ LoadRoot(r3, Heap::kNullValueRootIndex);
- __ cmp(r2, r3);
- __ b(eq, &use_global_proxy);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r0);
- __ push(r0);
-
- __ mov(r0, r2);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(r2, r0);
-
- __ pop(r0);
- __ SmiUntag(r0);
-
- // Exit the internal frame.
- }
-
- // Restore the function to r1, and the flag to r4.
- __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ mov(r4, Operand::Zero());
- __ jmp(&patch_receiver);
-
- __ bind(&use_global_proxy);
- __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r2, MemOperand(r3, -kPointerSize));
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(eq, &shift_arguments);
- __ bind(&non_function);
- __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
- __ str(r1, MemOperand(r2, -kPointerSize));
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
- { Label loop;
+ // r1: callable
+ {
+ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -1389,49 +1249,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // r0: actual number of arguments
- // r1: function
- // r4: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ tst(r4, r4);
- __ b(eq, &function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(r2, Operand::Zero());
- __ cmp(r4, Operand(1));
- __ b(ne, &non_proxy);
-
- __ push(r1); // re-add proxy object as additional argument
- __ add(r0, r0, Operand(1));
- __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // r0: actual number of arguments
- // r1: function
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ SmiUntag(r2);
- __ cmp(r2, r0); // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET,
- ne);
-
- __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(r3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1500,105 +1319,32 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(r1);
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r0);
- __ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r0);
+ __ ldr(r1, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ Push(r0, r1);
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
- Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, r0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
- __ push(r0); // limit
- __ mov(r1, Operand::Zero()); // initial index
- __ push(r1);
-
- // Get the receiver.
- __ ldr(r0, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r1.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_proxy;
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ b(ne, &push_receiver);
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(r0, &call_to_object);
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_proxy);
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r1);
- __ b(eq, &use_global_proxy);
-
- // Check if the receiver is already a JavaScript object.
- // r0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &push_receiver);
-
- // Convert the receiver to a regular object.
- // r0: receiver
- __ bind(&call_to_object);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ b(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- // r0: receiver
- __ bind(&push_receiver);
- __ push(r0);
+ __ mov(r1, Operand::Zero());
+ __ ldr(r2, MemOperand(fp, kReceiverOffset));
+ __ Push(r0, r1, r2); // limit, initial index and receiver.
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(r0);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &call_proxy);
- __ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
-
- frame_scope.GenerateLeaveFrame();
- __ add(sp, sp, Operand(kStackSize * kPointerSize));
- __ Jump(lr);
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(r1); // add function proxy as last argument
- __ add(r0, r0, Operand(1));
- __ mov(r2, Operand::Zero());
- __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
@@ -1640,9 +1386,10 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ push(r0);
__ ldr(r0, MemOperand(fp, kNewTargetOffset)); // get the new.target
__ push(r0);
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
- Generate_CheckStackOverflow(masm, kFunctionOffset, r0, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, r0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
@@ -1735,6 +1482,249 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(r1);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ ldrb(r3, FieldMemOperand(r2, SharedFunctionInfo::kNativeByteOffset));
+ __ tst(r3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ b(ne, &done_convert);
+ {
+ __ ldr(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSFunction)
+ // -- r2 : the shared function info.
+ // -- r3 : the receiver
+ // -- cp : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(r3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ b(hs, &done_convert);
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
+ __ JumpIfNotRoot(r3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r3);
+ }
+ __ b(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r0);
+ __ Push(r0, r1);
+ __ mov(r0, r3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(r3, r0);
+ __ Pop(r0, r1);
+ __ SmiUntag(r0);
+ }
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ str(r3, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the function to call (checked to be a JSFunction)
+ // -- r2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ ldr(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(r2);
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(r0);
+ ParameterCount expected(r2);
+ __ InvokeCode(r3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(r1, &non_callable);
+ __ bind(&non_smi);
+ __ CompareObjectType(r1, r4, r5, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
+ eq);
+ __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ ldr(r1, FieldMemOperand(r1, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(r1);
+ __ b(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ ldrb(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r4, Operand(1 << Map::kIsCallable));
+ __ b(eq, &non_callable);
+ // Overwrite the original receiver the (original) target.
+ __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the constructor to call (checked to be a JSFunction)
+ // -- r3 : the original constructor (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(r1);
+ __ AssertFunction(r3);
+
+ // Calling convention for function specific ConstructStubs require
+ // r2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the constructor to call (checked to be a JSFunctionProxy)
+ // -- r3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ ldr(r1, FieldMemOperand(r1, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r1 : the constructor to call (can be any Object)
+ // -- r3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(r1, &non_constructor);
+ __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ tst(r2, Operand(1 << Map::kIsConstructor));
+ __ b(eq, &non_constructor);
+
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r4, r5, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r1 : the target to call (can be any Object).
+
+ // Find the address of the last argument.
+ __ add(r3, r0, Operand(1)); // Add one for receiver.
+ __ mov(r3, Operand(r3, LSL, kPointerSizeLog2));
+ __ sub(r3, r2, r3);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ b(al, &loop_check);
+ __ bind(&loop_header);
+ __ ldr(r4, MemOperand(r2, -kPointerSize, PostIndex));
+ __ push(r4);
+ __ bind(&loop_check);
+ __ cmp(r2, r3);
+ __ b(gt, &loop_header);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@@ -1757,7 +1747,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into r0 and copy end address into r2.
+ // Calculate copy start address into r0 and copy end address into r4.
// r0: actual number of arguments as a smi
// r1: function
// r2: expected number of arguments
@@ -1765,19 +1755,20 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ add(r0, fp, Operand::PointerOffsetFromSmiKey(r0));
// adjust for return address and receiver
__ add(r0, r0, Operand(2 * kPointerSize));
- __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
+ __ sub(r4, r0, Operand(r2, LSL, kPointerSizeLog2));
// Copy the arguments (including the receiver) to the new stack frame.
// r0: copy start address
// r1: function
- // r2: copy end address
+ // r2: expected number of arguments
// r3: code entry to call
+ // r4: copy end address
Label copy;
__ bind(&copy);
__ ldr(ip, MemOperand(r0, 0));
__ push(ip);
- __ cmp(r0, r2); // Compare before moving to next argument.
+ __ cmp(r0, r4); // Compare before moving to next argument.
__ sub(r0, r0, Operand(kPointerSize));
__ b(ne, &copy);
@@ -1835,20 +1826,23 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r2: expected number of arguments
// r3: code entry to call
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
+ __ sub(r4, fp, Operand(r2, LSL, kPointerSizeLog2));
// Adjust for frame.
- __ sub(r2, r2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ sub(r4, r4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(ip);
- __ cmp(sp, r2);
+ __ cmp(sp, r4);
__ b(ne, &fill);
}
// Call the entry point.
__ bind(&invoke);
+ __ mov(r0, r2);
+ // r0 : expected number of arguments
+ // r1 : function (passed through to callee)
__ Call(r3);
// Store offset of return address for deoptimizer.
@@ -1869,7 +1863,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bkpt(0);
}
}
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 8193816c84..a8a4b5f5ac 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -16,6 +16,8 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/arm/code-stubs-arm.h"
+
namespace v8 {
namespace internal {
@@ -679,29 +681,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cc == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cc == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cc == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- __ mov(r0, Operand(Smi::FromInt(ncr)));
- __ push(r0);
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
}
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ __ push(r0);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1287,209 +1285,108 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-// Uses registers r0 to r4.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: r0 or at sp + 1 * kPointerSize.
-// * function: r1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline sites to patch are passed in r5 and r6.
-// (See LCodeGen::DoInstanceOfKnownGlobal)
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = r0; // Object (lhs).
- Register map = r3; // Map of the object.
- const Register function = r1; // Function (rhs).
- const Register prototype = r4; // Prototype of the function.
- const Register scratch = r2;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ ldr(object, MemOperand(sp, 1 * kPointerSize));
- __ ldr(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ b(ne, &miss);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
- __ b(ne, &miss);
- __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = r1; // Object (lhs).
+ Register const function = r0; // Function (rhs).
+ Register const object_map = r2; // Map of {object}.
+ Register const function_map = r3; // Map of {function}.
+ Register const function_prototype = r4; // Prototype of {function}.
+ Register const scratch = r5;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ b(ne, &fast_case);
+ __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+ __ b(ne, &fast_case);
+ __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ b(ne, &slow_case);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ __ Ret();
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- DCHECK(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The map_load_offset was stored in r5
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register map_load_offset = r5;
- __ sub(r9, lr, map_load_offset);
- // Get the map location in r5 and patch it.
- __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
- __ ldr(map_load_offset, MemOperand(map_load_offset));
- __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
-
- __ mov(scratch, map);
- // |map_load_offset| points at the beginning of the cell. Calculate the
- // field containing the map.
- __ add(function, map_load_offset, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(map_load_offset, Cell::kValueOffset, scratch, function,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ b(ne, &slow_case);
- // Register mapping: r3 is object map and r4 is function prototype.
- // Get prototype of object into r2.
- __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // Ensure that {function} has an instance prototype.
+ __ ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ b(ne, &slow_case);
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ ldr(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch, FieldMemOperand(shared_info,
+ SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(scratch,
+ Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
+ __ b(ne, &slow_case);
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ // Get the "prototype" (or initial map) of the {function}.
+ __ ldr(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
+ __ b(ne, &function_prototype_valid);
+ __ ldr(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmp(scratch, Operand(prototype));
- __ b(eq, &is_instance);
- __ cmp(scratch, scratch2);
- __ b(eq, &is_not_instance);
- __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
- Factory* factory = isolate()->factory();
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ Move(r0, factory->true_value());
- }
- } else {
- // Patch the call site to return true.
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- // The bool_load_offset was stored in r6
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register bool_load_offset = r6;
- __ sub(r9, lr, bool_load_offset);
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(r9, scratch, scratch2);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(0)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ Move(r0, factory->false_value());
- }
- } else {
- // Patch the call site to return false.
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- // The bool_load_offset was stored in r6
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register bool_load_offset = r6;
- __ sub(r9, lr, bool_load_offset);
- ;
- // Get the boolean result location in scratch and patch it.
- __ GetRelocatedValueLocation(r9, scratch, scratch2);
- __ str(r0, MemOperand(scratch));
-
- if (!ReturnTrueFalseObject()) {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Null is not instance of anything.
- __ cmp(object, Operand(isolate()->factory()->null_value()));
- __ b(ne, &object_not_null);
- if (ReturnTrueFalseObject()) {
- __ Move(r0, factory->false_value());
- } else {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- if (ReturnTrueFalseObject()) {
- __ Move(r0, factory->false_value());
- } else {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- if (ReturnTrueFalseObject()) {
- __ Move(r0, factory->false_value());
- } else {
- __ mov(r0, Operand(Smi::FromInt(1)));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
+ __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, function_prototype);
+ __ b(eq, &done);
+ __ cmp(object_prototype, null);
+ __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ b(ne, &loop);
+ __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(r0, r1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r0, r1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ cmp(r0, Operand::Zero());
- __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1596,65 +1493,68 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // r1 : function
+ // r2 : number of parameters (tagged)
+ // r3 : parameters pointer
+
+ DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
- __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+ __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &runtime);
// Patch the arguments.length and the parameters pointer in the current frame.
- __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r2, MemOperand(sp, 0 * kPointerSize));
- __ add(r3, r3, Operand(r2, LSL, 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r4, r4, Operand(r2, LSL, 1));
+ __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
__ bind(&runtime);
+ __ Push(r1, r3, r2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
+ // r1 : function
+ // r2 : number of parameters (tagged)
+ // r3 : parameters pointer
// Registers used over whole function:
- // r6 : allocated object (tagged)
- // r9 : mapped parameter count (tagged)
+ // r5 : arguments count (tagged)
+ // r6 : mapped parameter count (tagged)
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- // r1 = parameter count (tagged)
+ DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
- __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Label adaptor_frame, try_allocate, runtime;
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+ __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
// No adaptor, parameter count = argument count.
- __ mov(r2, r1);
+ __ mov(r5, r2);
+ __ mov(r6, r2);
__ b(&try_allocate);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ add(r3, r3, Operand(r2, LSL, 1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r4, r4, Operand(r5, LSL, 1));
+ __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
- // r1 = parameter count (tagged)
- // r2 = argument count (tagged)
- // Compute the mapped parameter count = min(r1, r2) in r1.
- __ cmp(r1, Operand(r2));
- __ mov(r1, Operand(r2), LeaveCC, gt);
+ // r5 = argument count (tagged)
+ // r6 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r6, r5) in r6.
+ __ mov(r6, r2);
+ __ cmp(r6, Operand(r5));
+ __ mov(r6, Operand(r5), LeaveCC, gt);
__ bind(&try_allocate);
@@ -1663,20 +1563,20 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ cmp(r6, Operand(Smi::FromInt(0)));
__ mov(r9, Operand::Zero(), LeaveCC, eq);
- __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
+ __ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
__ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
// 2. Backing store.
- __ add(r9, r9, Operand(r2, LSL, 1));
+ __ add(r9, r9, Operand(r5, LSL, 1));
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
__ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+ __ Allocate(r9, r0, r4, r9, &runtime, TAG_OBJECT);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
@@ -1688,33 +1588,32 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
- __ cmp(r1, Operand::Zero());
+ __ cmp(r6, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
// r0 = address of new object (tagged)
- // r1 = mapped parameter count (tagged)
// r2 = argument count (smi-tagged)
// r4 = address of arguments map (tagged)
+ // r6 = mapped parameter count (tagged)
__ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ AssertNotSmi(r3);
+ __ AssertNotSmi(r1);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r3, FieldMemOperand(r0, kCalleeOffset));
+ __ str(r1, FieldMemOperand(r0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r2);
+ __ AssertSmi(r5);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
- __ str(r2, FieldMemOperand(r0, kLengthOffset));
+ __ str(r5, FieldMemOperand(r0, kLengthOffset));
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
@@ -1723,25 +1622,25 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
- // r1 = mapped parameter count (tagged)
// r2 = argument count (tagged)
// r4 = address of parameter map or backing store (tagged)
+ // r6 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ cmp(r1, Operand(Smi::FromInt(0)));
- // Move backing store address to r3, because it is
+ __ cmp(r6, Operand(Smi::FromInt(0)));
+ // Move backing store address to r1, because it is
// expected there when filling in the unmapped arguments.
- __ mov(r3, r4, LeaveCC, eq);
+ __ mov(r1, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
- __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ add(r6, r1, Operand(Smi::FromInt(2)));
- __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ add(r5, r6, Operand(Smi::FromInt(2)));
+ __ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ add(r6, r4, Operand(r1, LSL, 1));
- __ add(r6, r6, Operand(kParameterMapHeaderSize));
- __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+ __ add(r5, r4, Operand(r6, LSL, 1));
+ __ add(r5, r5, Operand(kParameterMapHeaderSize));
+ __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1752,74 +1651,71 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
Label parameters_loop, parameters_test;
- __ mov(r6, r1);
- __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
- __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ sub(r9, r9, Operand(r1));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
- __ add(r3, r4, Operand(r6, LSL, 1));
- __ add(r3, r3, Operand(kParameterMapHeaderSize));
-
- // r6 = loop variable (tagged)
- // r1 = mapping index (tagged)
- // r3 = address of backing store (tagged)
+ __ mov(r5, r6);
+ __ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ sub(r9, r9, Operand(r6));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ add(r1, r4, Operand(r5, LSL, 1));
+ __ add(r1, r1, Operand(kParameterMapHeaderSize));
+
+ // r1 = address of backing store (tagged)
// r4 = address of parameter map (tagged), which is also the address of new
// object + Heap::kSloppyArgumentsObjectSize (tagged)
// r0 = temporary scratch (a.o., for address calculation)
- // r5 = the hole value
+ // r5 = loop variable (tagged)
+ // ip = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
- __ sub(r6, r6, Operand(Smi::FromInt(1)));
- __ mov(r0, Operand(r6, LSL, 1));
+ __ sub(r5, r5, Operand(Smi::FromInt(1)));
+ __ mov(r0, Operand(r5, LSL, 1));
__ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
__ str(r9, MemOperand(r4, r0));
__ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ str(r5, MemOperand(r3, r0));
+ __ str(ip, MemOperand(r1, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ cmp(r5, Operand(Smi::FromInt(0)));
__ b(ne, &parameters_loop);
- // Restore r0 = new object (tagged)
+ // Restore r0 = new object (tagged) and r5 = argument count (tagged).
__ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize));
+ __ ldr(r5, FieldMemOperand(r0, kLengthOffset));
__ bind(&skip_parameter_map);
// r0 = address of new object (tagged)
- // r2 = argument count (tagged)
- // r3 = address of backing store (tagged)
- // r5 = scratch
+ // r1 = address of backing store (tagged)
+ // r5 = argument count (tagged)
+ // r6 = mapped parameter count (tagged)
+ // r9 = scratch
// Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
- __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
- __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+ __ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
+ __ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
Label arguments_loop, arguments_test;
- __ mov(r9, r1);
- __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
- __ sub(r4, r4, Operand(r9, LSL, 1));
+ __ sub(r3, r3, Operand(r6, LSL, 1));
__ jmp(&arguments_test);
__ bind(&arguments_loop);
- __ sub(r4, r4, Operand(kPointerSize));
- __ ldr(r6, MemOperand(r4, 0));
- __ add(r5, r3, Operand(r9, LSL, 1));
- __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
- __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ sub(r3, r3, Operand(kPointerSize));
+ __ ldr(r4, MemOperand(r3, 0));
+ __ add(r9, r1, Operand(r6, LSL, 1));
+ __ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
+ __ add(r6, r6, Operand(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ cmp(r9, Operand(r2));
+ __ cmp(r6, Operand(r5));
__ b(lt, &arguments_loop);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
+ // Return.
__ Ret();
// Do the runtime call to allocate the arguments object.
// r0 = address of new object (tagged)
- // r2 = argument count (tagged)
+ // r5 = argument count (tagged)
__ bind(&runtime);
- __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ Push(r1, r3, r5);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -1848,40 +1744,38 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(eq, &adaptor_frame);
+ // r1 : function
+ // r2 : number of parameters (tagged)
+ // r3 : parameters pointer
- // Get the length from the frame.
- __ ldr(r1, MemOperand(sp, 0));
- __ b(&try_allocate);
+ DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label try_allocate, runtime;
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r0, MemOperand(r4, StandardFrameConstants::kContextOffset));
+ __ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &try_allocate);
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 0));
- __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
+ __ ldr(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r4, r4, Operand::PointerOffsetFromSmiKey(r2));
+ __ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ SmiUntag(r1, SetCC);
+ __ SmiUntag(r9, r2, SetCC);
__ b(eq, &add_arguments_object);
- __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ add(r9, r9, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+ __ add(r9, r9, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ Allocate(r1, r0, r2, r3, &runtime,
+ __ Allocate(r9, r0, r4, r5, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
@@ -1891,84 +1785,56 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
- __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
- __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r5, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r5, FieldMemOperand(r0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
- __ AssertSmi(r1);
- __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ AssertSmi(r2);
+ __ str(r2,
+ FieldMemOperand(r0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
// If there are no actual arguments, we're done.
Label done;
- __ cmp(r1, Operand::Zero());
+ __ cmp(r2, Operand::Zero());
__ b(eq, &done);
- // Get the parameters pointer from the stack.
- __ ldr(r2, MemOperand(sp, 1 * kPointerSize));
-
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
- __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
- __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ SmiUntag(r1);
+ __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
+ __ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ str(r2, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ SmiUntag(r2);
// Copy the fixed array slots.
Label loop;
// Set up r4 to point to the first array slot.
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
- // Pre-decrement r2 with kPointerSize on each iteration.
+ // Pre-decrement r3 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
+ __ ldr(r5, MemOperand(r3, kPointerSize, NegPreIndex));
// Post-increment r4 with kPointerSize on each iteration.
- __ str(r3, MemOperand(r4, kPointerSize, PostIndex));
- __ sub(r1, r1, Operand(1));
- __ cmp(r1, Operand::Zero());
+ __ str(r5, MemOperand(r4, kPointerSize, PostIndex));
+ __ sub(r2, r2, Operand(1));
+ __ cmp(r2, Operand::Zero());
__ b(ne, &loop);
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ add(sp, sp, Operand(3 * kPointerSize));
__ Ret();
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ Push(r1, r3, r2);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // Stack layout on entry.
- // sp[0] : language mode
- // sp[4] : index of rest parameter
- // sp[8] : number of parameters
- // sp[12] : receiver displacement
-
- Label runtime;
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ b(ne, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ str(r1, MemOperand(sp, 2 * kPointerSize));
- __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
- __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ str(r3, MemOperand(sp, 3 * kPointerSize));
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2443,27 +2309,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ b(eq, &done);
__ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
- __ b(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ b(ne, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &miss);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
- __ cmp(r1, r5);
- __ b(ne, &megamorphic);
- __ jmp(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
+ __ b(ne, &megamorphic);
+ __ jmp(&done);
__ bind(&miss);
@@ -2482,24 +2346,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
- __ cmp(r1, r5);
- __ b(ne, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ b(&done);
-
- __ bind(&not_array_function);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
+ __ b(ne, &not_array_function);
- CreateWeakCellStub create_stub(masm->isolate());
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ b(&done);
+
+ __ bind(&not_array_function);
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2518,31 +2379,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, non_function);
- __ push(r1); // put proxy as additional argument
- __ mov(r0, Operand(argc + 1, RelocInfo::NONE32));
- __ mov(r2, Operand::Zero());
- __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ str(r1, MemOperand(sp, argc * kPointerSize));
- __ mov(r0, Operand(argc)); // Set up the number of arguments.
- __ mov(r2, Operand::Zero());
- __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ mov(r0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2564,12 +2403,12 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// r1 : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
+ __ JumpIfSmi(r1, &slow);
// Goto slow case if we do not have a function.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
@@ -2604,7 +2443,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -2625,33 +2464,26 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r2 : feedback vector
// r3 : slot in feedback vector (Smi, for RecordCallTarget)
// r4 : original constructor (for IsSuperConstructorCall)
- Label slow, non_function_call;
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(r1, &non_function_call);
+ __ JumpIfSmi(r1, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r5, r5, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
+ __ b(ne, &non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into r2.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by r3 + 1.
- __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into r2, or undefined.
- __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
- __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(eq, &feedback_register_initialized);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into r2, or undefined.
+ __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(eq, &feedback_register_initialized);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(r2, r5);
}
@@ -2663,62 +2495,28 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mov(r3, r1);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = r4;
- __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- // r5: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r5, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand::Zero());
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(vector, FieldMemOperand(vector,
- JSFunction::kSharedFunctionInfoOffset));
- __ ldr(vector, FieldMemOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ __ mov(r3, r1);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r1 - function
// r3 - slot id
// r2 - vector
- Label miss;
- int argc = arg_count();
- ParameterCount actual(argc);
-
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
- __ cmp(r1, r4);
- __ b(ne, &miss);
+ // r4 - allocation site (loaded from vector[slot])
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r5);
+ __ cmp(r1, r5);
+ __ b(ne, miss);
__ mov(r0, Operand(arg_count()));
- __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
- __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
-
- // Verify that r4 contains an AllocationSite
- __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
- __ b(ne, &miss);
// Increment the call count for monomorphic function calls.
__ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
@@ -2731,18 +2529,6 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- // Unreachable.
- __ stop("Unexpected code address");
}
@@ -2755,7 +2541,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2809,7 +2595,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2817,11 +2603,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &slow_start);
+ // Verify that r4 contains an AllocationSite
+ __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
+ __ b(ne, &not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -2896,7 +2692,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
- __ JumpIfSmi(r1, &non_function);
+ __ JumpIfSmi(r1, &slow);
// Goto slow case if we do not have a function.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
@@ -2912,10 +2708,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r1, r2, r3);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
@@ -3330,15 +3123,10 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&not_smi);
- Label not_heap_number;
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
- // r0: object
- // r1: instance type.
- __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
- __ b(ne, &not_heap_number);
- __ Ret();
- __ bind(&not_heap_number);
+ __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+ // r0: receiver
+ // r1: receiver instance type
+ __ Ret(eq);
Label not_string, slow_string;
__ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
@@ -3362,7 +3150,37 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r0); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in r0.
+ Label is_number;
+ __ JumpIfSmi(r0, &is_number);
+
+ __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+ // r0: receiver
+ // r1: receiver instance type
+ __ Ret(lo);
+
+ Label not_heap_number;
+ __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ cmp(r1, Operand(ODDBALL_TYPE));
+ __ b(ne, &not_oddball);
+ __ ldr(r0, FieldMemOperand(r0, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r0); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3461,38 +3279,37 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
+ // ----------- S t a t e -------------
+ // -- r1 : left
+ // -- r0 : right
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertString(r1);
+ __ AssertString(r0);
Label not_same;
__ cmp(r0, r1);
__ b(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
- __ add(sp, sp, Operand(2 * kPointerSize));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r1,
+ r2);
__ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
- // Compare flat one-byte strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
- __ add(sp, sp, Operand(2 * kPointerSize));
+ // Compare flat one-byte strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
+ r3);
StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
+ __ Push(r1, r0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3528,6 +3345,30 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(r1, r2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(r0, r3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ ldr(r1, FieldMemOperand(r1, Oddball::kToNumberOffset));
+ __ AssertSmi(r1);
+ __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
+ __ AssertSmi(r0);
+ }
+ __ sub(r0, r1, r0);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -3815,8 +3656,20 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ cmp(r3, r4);
__ b(ne, &miss);
- __ sub(r0, r0, Operand(r1));
- __ Ret();
+ if (Token::IsEqualityOp(op())) {
+ __ sub(r0, r0, Operand(r1));
+ __ Ret();
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ mov(r2, Operand(Smi::FromInt(GREATER)));
+ } else {
+ __ mov(r2, Operand(Smi::FromInt(LESS)));
+ }
+ __ Push(r1, r0, r2);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4379,33 +4232,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, r2);
+ __ EmitLoadTypeFeedbackVector(r2);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, r2);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4414,11 +4260,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4528,8 +4373,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, r9, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
@@ -4543,7 +4387,6 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&miss);
LoadIC::GenerateMiss(masm);
-
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
@@ -4589,8 +4432,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, r9, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss);
__ bind(&not_array);
// Is it generic?
@@ -4609,8 +4451,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, r9, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4622,14 +4463,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4646,11 +4487,54 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r1
+ Register key = VectorStoreICDescriptor::NameRegister(); // r2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0)); // r0
+ Register feedback = r5;
+ Register receiver_map = r6;
+ Register scratch1 = r9;
+
+ __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &not_array);
+
+ // We are using register r8, which is used for the embedded constant pool
+ // when FLAG_enable_embedded_constant_pool is true.
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ Register scratch2 = r8;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ b(ne, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
}
@@ -4664,12 +4548,133 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+ __ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
+ __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ ldr(cached_map, MemOperand(pointer_reg));
+ __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ b(ne, &prepare_next);
+ // Is it a transitioning store?
+ __ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &transition_call);
+ __ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&transition_call);
+ __ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(feedback, too_far);
+
+ __ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&prepare_next);
+ __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ cmp(pointer_reg, too_far);
+ __ b(lt, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r1
+ Register key = VectorStoreICDescriptor::NameRegister(); // r2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r0)); // r0
+ Register feedback = r5;
+ Register receiver_map = r6;
+ Register scratch1 = r9;
+
+ __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+
+ // We are using register r8, which is used for the embedded constant pool
+ // when FLAG_enable_embedded_constant_pool is true.
+ DCHECK(!FLAG_enable_embedded_constant_pool);
+ Register scratch2 = r8;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ b(ne, &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ b(ne, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
}
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index b2b2c08cd8..e572fd9a1b 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -129,8 +129,8 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(),
- 2 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
+ 2 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 6a9f4677f6..97f1034061 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm/codegen-arm.h"
+
#if V8_TARGET_ARCH_ARM
#include "src/arm/simulator-arm.h"
@@ -65,7 +67,7 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
@@ -225,7 +227,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@@ -312,7 +314,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(&desc);
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
@@ -340,7 +342,7 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
@@ -932,7 +934,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index f291ba92ca..f5d2ab19d1 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -16,28 +16,14 @@
#include "src/assembler.h"
#include "src/macro-assembler.h"
-#include "src/simulator.h" // for cache flushing.
namespace v8 {
namespace internal {
-
void CpuFeatures::FlushICache(void* start, size_t size) {
- if (size == 0) return;
-
- if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
-
-#if defined(USE_SIMULATOR)
- // Not generating ARM instructions for C-code. This means that we are
- // building an ARM emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
-
-#elif V8_OS_QNX
+#if !defined(USE_SIMULATOR)
+#if V8_OS_QNX
msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
-
#else
register uint32_t beg asm("r0") = reinterpret_cast<uint32_t>(start);
register uint32_t end asm("r1") = beg + size;
@@ -73,6 +59,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
: "memory");
#endif
#endif
+#endif // !USE_SIMULATOR
}
} // namespace internal
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index 2004ff6ae9..b1139bacc4 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -10,6 +10,7 @@
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/assembler-arm.h"
+#include "src/arm/frames-arm.h"
#include "src/arm/macro-assembler-arm.h"
namespace v8 {
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index f26b62ccaa..aa49843bd0 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm/interface-descriptors-arm.h"
+
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
@@ -31,6 +33,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r4; }
const Register VectorStoreICDescriptor::VectorRegister() { return r3; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return r4; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return r3; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return r5; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return r3; }
@@ -41,14 +48,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
-const Register InstanceofDescriptor::left() { return r0; }
-const Register InstanceofDescriptor::right() { return r1; }
+const Register InstanceOfDescriptor::LeftRegister() { return r1; }
+const Register InstanceOfDescriptor::RightRegister() { return r0; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return r1; }
+const Register StringCompareDescriptor::RightRegister() { return r0; }
const Register ArgumentsAccessReadDescriptor::index() { return r1; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
+const Register ArgumentsAccessNewDescriptor::function() { return r1; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return r2; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r3; }
+
+
const Register ApiGetterDescriptor::function_address() { return r2; }
@@ -64,10 +80,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -94,6 +110,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return r0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return r0; }
@@ -181,6 +201,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r1 : the target to call
+ Register registers[] = {r1, r0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r2, r1, r0};
@@ -386,6 +415,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r0, // argument count (including receiver)
+ r2, // address of first argument
+ r1 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 1c04ba7ee7..4ccb020995 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm/lithium-arm.h"
+
#include <sstream>
#include "src/arm/lithium-codegen-arm.h"
@@ -168,13 +170,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -921,28 +916,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -994,22 +986,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result =
- new(zone()) LInstanceOf(context, UseFixed(instr->left(), r0),
- UseFixed(instr->right(), r1));
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, r0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), cp),
- UseFixed(instr->left(), r0),
- FixedTemp(r4));
- return MarkAsCall(DefineFixed(result, r0), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
@@ -1785,14 +1776,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(value, temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -2510,13 +2493,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index eea9ece5ae..8954710e53 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -79,19 +79,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -132,6 +130,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -235,8 +234,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -392,6 +389,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -1011,23 +1014,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1192,41 +1178,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2593,19 +2565,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 606721f2da..d958405e82 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -7,10 +7,10 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -118,8 +118,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info_->is_native() && info_->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -171,16 +170,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r1.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(r1);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -188,7 +198,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(r1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ mov(cp, r0);
@@ -226,13 +237,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -807,7 +812,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -1061,11 +1065,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2150,7 +2149,14 @@ void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
}
-template<class InstrType>
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ b(condition, chunk_->GetAssemblyLabel(true_block));
+}
+
+
+template <class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
int false_block = instr->FalseDestination(chunk_);
__ b(condition, chunk_->GetAssemblyLabel(false_block));
@@ -2453,45 +2459,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- Register temp2 = scratch0();
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ cmp(input, temp2);
- __ b(eq, is_object);
-
- // Load map.
- __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ tst(temp2, Operand(1 << Map::kIsUndetectable));
- __ b(ne, is_not_object);
-
- // Load instance type and check that it is in object type range.
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- Condition true_cond =
- EmitIsObject(reg, temp1,
- instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond);
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2562,17 +2529,14 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
+ DCHECK(ToRegister(instr->left()).is(r1));
+ DCHECK(ToRegister(instr->right()).is(r0));
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined.
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
__ cmp(r0, Operand::Zero());
- Condition condition = ComputeCompareCondition(op);
-
- EmitBranch(instr, condition);
+ EmitBranch(instr, ComputeCompareCondition(instr->op()));
}
@@ -2725,160 +2689,40 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0.
- DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1.
-
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(r0));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- __ cmp(r0, Operand::Zero());
- __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
- __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
}
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
- &load_bool_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
- Label* load_bool() { return &load_bool_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- Label load_bool_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Block constant pool emission to ensure the positions of instructions are
- // as expected by the patcher. See InstanceofStub::Generate().
- Assembler::BlockConstPoolScope block_const_pool(masm());
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ mov(ip, Operand(cell));
- __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
- __ cmp(map, Operand(ip));
- __ b(ne, &cache_miss);
- __ bind(deferred->load_bool()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ SmiTst(object);
+ EmitFalseBranch(instr, eq);
}
- __ b(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(object, Operand(ip));
- __ b(eq, &false_result);
-
- // String values is not instance of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp);
- __ b(is_string, &false_result);
-
- // Go to the deferred code.
- __ b(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check,
- Label* bool_load) {
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
-
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
-
- __ Move(InstanceofStub::right(), instr->function());
- int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
- int additional_delta = (call_size / Assembler::kInstrSize) + 4;
- {
- // Make sure that code size is predicable, since we use specific constants
- // offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(
- masm_, additional_delta * Assembler::kInstrSize);
- // The labels must be already bound since the code has predictabel size up
- // to the call instruction.
- DCHECK(map_check->is_bound());
- DCHECK(bool_load->is_bound());
- // Make sure we don't emit any additional entries in the constant pool
- // before the call to ensure that the CallCodeSize() calculated the
- // correct number of instructions for the constant pool load.
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- int map_check_delta =
- masm_->InstructionsGeneratedSince(map_check) + additional_delta;
- int bool_load_delta =
- masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- __ BlockConstPoolFor(additional_delta);
- // r5 is used to communicate the offset to the location of the map check.
- __ mov(r5, Operand(map_check_delta * kPointerSize));
- // r6 is used to communicate the offset to the location of the bool load.
- __ mov(r6, Operand(bool_load_delta * kPointerSize));
- // The mov above can generate one or two instructions. The delta was
- // computed for two instructions, so we need to pad here in case of one
- // instruction.
- while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
- __ nop();
- }
- }
- CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- }
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value (r0) into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq);
+ __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ b(&loop);
}
@@ -3612,11 +3456,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ mov(r0, Operand(arity));
- }
+ // Always initialize r0 to the number of actual arguments.
+ __ mov(r0, Operand(arity));
// Invoke function.
__ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
@@ -3997,9 +3838,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r1));
DCHECK(ToRegister(instr->result()).is(r0));
- if (instr->hydrogen()->pass_argument_count()) {
- __ mov(r0, Operand(instr->arity()));
- }
+ __ mov(r0, Operand(instr->arity()));
// Change context.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -5559,7 +5398,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// r0 = regexp literal clone.
// r2-5 are used as temporaries.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ Move(r6, instr->hydrogen()->literals());
__ ldr(r1, FieldMemOperand(r6, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -5594,26 +5433,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ mov(r2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r2, Operand(instr->hydrogen()->shared_info()));
- __ mov(r1, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, r2, r1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r3));
DCHECK(ToRegister(instr->result()).is(r0));
@@ -5682,27 +5501,25 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
- __ b(eq, true_label);
- __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ and_(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmp(scratch, Operand(1 << Map::kIsCallable));
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->object_string())) {
- Register map = scratch;
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
- __ CheckObjectTypeRange(input,
- map,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE,
- false_label);
- // Check for undetectable objects => false.
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ b(lt, false_label);
+ // Check for callable or undetectable objects => false.
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = eq;
// clang-format off
@@ -5749,7 +5566,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 176097f5d9..dc58479047 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -115,8 +115,6 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
@@ -272,7 +270,9 @@ class LCodeGen: public LCodeGenBase {
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition condition);
- template<class InstrType>
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition);
+ template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
DwVfpRegister result, NumberUntagDMode mode);
@@ -285,14 +285,6 @@ class LCodeGen: public LCodeGenBase {
Register input,
Handle<String> type_name);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index 31feb11edc..e1bd47b2ec 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -278,10 +278,10 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kScratchDoubleReg was used to break the cycle.
- __ vstm(db_w, sp, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpush(kScratchDoubleReg);
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
- __ vldm(ia_w, sp, kScratchDoubleReg, kScratchDoubleReg);
+ __ vpop(kScratchDoubleReg);
} else {
__ vldr(kScratchDoubleReg, source_operand);
__ vstr(kScratchDoubleReg, destination_operand);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 4034fa95a4..49802ba734 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -10,10 +10,11 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
+#include "src/arm/macro-assembler-arm.h"
+
namespace v8 {
namespace internal {
@@ -425,7 +426,7 @@ void MacroAssembler::LoadRoot(Register destination,
!predictable_code_size()) {
// The CPU supports fast immediate values, and this root will never
// change. We will load it as a relocatable immediate value.
- Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
+ Handle<Object> root = isolate()->heap()->root_handle(index);
mov(destination, Operand(root), LeaveCC, cond);
return;
}
@@ -1041,6 +1042,14 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ ldr(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
@@ -1249,10 +1258,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ mov(r0, Operand(actual.immediate()));
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- mov(r0, Operand(actual.immediate()));
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -1267,9 +1276,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
} else {
if (actual.is_immediate()) {
+ mov(r0, Operand(actual.immediate()));
cmp(expected.reg(), Operand(actual.immediate()));
b(eq, &regular_invoke);
- mov(r0, Operand(actual.immediate()));
} else {
cmp(expected.reg(), Operand(actual.reg()));
b(eq, &regular_invoke);
@@ -1388,26 +1397,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- b(lt, fail);
- cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- b(gt, fail);
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
@@ -2003,21 +1992,6 @@ void MacroAssembler::CompareObjectType(Register object,
}
-void MacroAssembler::CheckObjectTypeRange(Register object,
- Register map,
- InstanceType min_type,
- InstanceType max_type,
- Label* false_label) {
- STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
- ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- ldrb(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
- sub(ip, ip, Operand(min_type));
- cmp(ip, Operand(max_type - min_type));
- b(hi, false_label);
-}
-
-
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
@@ -2216,34 +2190,8 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- b(ne, miss);
-
- ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- ldr(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- tst(scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- b(ne, miss);
-
- // Make sure that the function has an instance prototype.
- ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
- b(ne, &non_instance);
- }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2263,15 +2211,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Get the prototype from the initial map.
ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch, ip);
- }
-
// All done.
bind(&done);
}
@@ -2542,13 +2481,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(r2, id);
+ GetBuiltinEntry(r2, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(r2));
Call(r2);
@@ -2561,20 +2499,20 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
ldr(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
- ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ ldr(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(r1));
- GetBuiltinFunction(r1, id);
+ GetBuiltinFunction(r1, native_context_index);
// Load the code entry point from the builtins object.
ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
}
@@ -2710,6 +2648,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ ldr(dst, GlobalObjectOperand());
+ ldr(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
@@ -2871,6 +2815,19 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ tst(object, Operand(kSmiTagMask));
+ Check(ne, kOperandIsASmiAndNotAFunction);
+ push(object);
+ CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2905,88 +2862,6 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
- sub(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- JumpIfSmi(object, &is_smi);
- CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- eor(scratch1, scratch1, Operand(scratch2));
- and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- sub(scratch2, object, Operand(kHeapObjectTag));
- vldr(d0, scratch2, HeapNumber::kValueOffset);
- sub(probe, probe, Operand(kHeapObjectTag));
- vldr(d1, probe, HeapNumber::kValueOffset);
- VFPCompareAndSetFlags(d0, d1);
- b(ne, not_found); // The cache did not contain this value.
- b(&load_result_from_cache);
-
- bind(&is_smi);
- Register scratch = scratch1;
- and_(scratch, mask, Operand(object, ASR, 1));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- add(scratch,
- number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- cmp(object, probe);
- b(ne, not_found);
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -3386,75 +3261,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
}
-void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result,
- Register scratch) {
- Label small_constant_pool_load, load_result;
- ldr(result, MemOperand(ldr_location));
-
- if (FLAG_enable_embedded_constant_pool) {
- // Check if this is an extended constant pool load.
- and_(scratch, result, Operand(GetConsantPoolLoadMask()));
- teq(scratch, Operand(GetConsantPoolLoadPattern()));
- b(eq, &small_constant_pool_load);
- if (emit_debug_code()) {
- // Check that the instruction sequence is:
- // movw reg, #offset_low
- // movt reg, #offset_high
- // ldr reg, [pp, reg]
- Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
- GetLdrPpRegOffsetPattern()};
- for (int i = 0; i < 3; i++) {
- ldr(result, MemOperand(ldr_location, i * kInstrSize));
- and_(result, result, Operand(patterns[i]));
- cmp(result, Operand(patterns[i]));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- }
- // Result was clobbered. Restore it.
- ldr(result, MemOperand(ldr_location));
- }
-
- // Get the offset into the constant pool. First extract movw immediate into
- // result.
- and_(scratch, result, Operand(0xfff));
- mov(ip, Operand(result, LSR, 4));
- and_(ip, ip, Operand(0xf000));
- orr(result, scratch, Operand(ip));
- // Then extract movt immediate and or into result.
- ldr(scratch, MemOperand(ldr_location, kInstrSize));
- and_(ip, scratch, Operand(0xf0000));
- orr(result, result, Operand(ip, LSL, 12));
- and_(scratch, scratch, Operand(0xfff));
- orr(result, result, Operand(scratch, LSL, 16));
-
- b(&load_result);
- }
-
- bind(&small_constant_pool_load);
- if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
- and_(result, result, Operand(GetConsantPoolLoadPattern()));
- cmp(result, Operand(GetConsantPoolLoadPattern()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- // Result was clobbered. Restore it.
- ldr(result, MemOperand(ldr_location));
- }
-
- // Get the offset into the constant pool.
- const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- and_(result, result, Operand(kLdrOffsetMask));
-
- bind(&load_result);
- // Get the address of the constant.
- if (FLAG_enable_embedded_constant_pool) {
- add(result, pp, Operand(result));
- } else {
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(Instruction::kPCReadOffset));
- }
-}
-
-
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
@@ -3866,7 +3672,7 @@ CodePatcher::CodePatcher(byte* address,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICacheWithoutIsolate(address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 5ec2bd3f8b..d78bf8f49a 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -164,6 +164,7 @@ class MacroAssembler: public Assembler {
void Pop(Register dst) { pop(dst); }
// Register move. May do nothing if the registers are identical.
+ void Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
@@ -608,6 +609,9 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -662,15 +666,6 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
void IsObjectJSStringType(Register object,
Register scratch,
Label* fail);
@@ -851,11 +846,8 @@ class MacroAssembler: public Assembler {
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
@@ -870,15 +862,6 @@ class MacroAssembler: public Assembler {
Register type_reg,
InstanceType type);
- // Compare object type for heap object. Branch to false_label if type
- // is lower than min_type or greater than max_type.
- // Load map into the register map.
- void CheckObjectTypeRange(Register heap_object,
- Register map,
- InstanceType min_type,
- InstanceType max_type,
- Label* false_label);
-
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -968,7 +951,23 @@ class MacroAssembler: public Assembler {
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index) {
+ LoadRoot(ip, index);
+ Push(ip);
+ }
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ CompareRoot(with, index);
+ b(eq, if_equal);
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(with, index);
+ b(ne, if_not_equal);
+ }
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
@@ -1154,18 +1153,16 @@ class MacroAssembler: public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -1311,6 +1308,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1330,18 +1330,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
// Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
@@ -1375,14 +1363,6 @@ class MacroAssembler: public Assembler {
Register value,
uint32_t encoding_mask);
- // ---------------------------------------------------------------------------
- // Patching helpers.
-
- // Get the location of a relocated constant (its address in the constant pool)
- // from its load site.
- void GetRelocatedValueLocation(Register ldr_location, Register result,
- Register scratch);
-
void ClampUint8(Register output_reg, Register input_reg);
@@ -1429,6 +1409,9 @@ class MacroAssembler: public Assembler {
DecodeField<Field>(reg, reg);
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 3fbb09147b..f02207f549 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -665,7 +665,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
- // CpuFeatures::FlushICache(pc, sizeof(target));
+ // Assembler::FlushICacheWithoutIsolate(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index 235b5ee2bc..37a2f5a29d 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -29,6 +29,7 @@
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
+#include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/frames-arm64.h"
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index a7e5a06640..f20be8315e 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -2279,7 +2279,7 @@ class PatchingAssembler : public Assembler {
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
- CpuFeatures::FlushICache(buffer_, length);
+ Assembler::FlushICacheWithoutIsolate(buffer_, length);
}
// See definition of PatchAdrFar() for details.
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 19a83646f9..4331198017 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -51,12 +51,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- x0 : number of arguments excluding receiver
// -- x1 : called function (only guaranteed when
// extra_args requires it)
- // -- cp : context
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument (argc == x0)
// -- sp[4 * argc] : receiver
// -----------------------------------
+ __ AssertFunction(x1);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -131,7 +138,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
@@ -139,119 +147,131 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
- ASM_LOCATION("Builtins::Generate_StringConstructCode");
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, x10, x11);
-
- Register argc = x0;
- Register function = x1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, x10);
- __ Cmp(function, x10);
- __ Assert(eq, kUnexpectedStringFunction);
- }
+ ASM_LOCATION("Builtins::Generate_StringConstructor");
- // Load the first arguments in x0 and get rid of the rest.
+ // 1. Load the first argument into x0 and get rid of the rest (including the
+ // receiver).
Label no_arguments;
- __ Cbz(argc, &no_arguments);
- // First args = sp[(argc - 1) * 8].
- __ Sub(argc, argc, 1);
- __ Drop(argc, kXRegSize);
- // jssp now point to args[0], load and drop args[0] + receiver.
- Register arg = argc;
- __ Ldr(arg, MemOperand(jssp, 2 * kPointerSize, PostIndex));
- argc = NoReg;
-
- Register argument = x2;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(arg, // Input.
- argument, // Result.
- x10, // Scratch.
- x11, // Scratch.
- x12, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, x10, x11);
- __ Bind(&argument_is_string);
-
- // ----------- S t a t e -------------
- // -- x2 : argument converted to string
- // -- x1 : constructor function
- // -- lr : return address
- // -----------------------------------
+ {
+ __ Cbz(x0, &no_arguments);
+ __ Sub(x0, x0, 1);
+ __ Drop(x0);
+ __ Ldr(x0, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ }
- Label gc_required;
- Register new_obj = x0;
- __ Allocate(JSValue::kSize, new_obj, x10, x11, &gc_required, TAG_OBJECT);
+ // 2a. At least one argument, return x0 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(x0, &to_string);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CompareObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE);
+ __ B(hi, &to_string);
+ __ B(eq, &symbol_descriptive_string);
+ __ Ret();
+ }
- // Initialize the String object.
- Register map = x3;
- __ LoadGlobalFunctionInitialMap(function, map, x10);
- if (FLAG_debug_code) {
- __ Ldrb(x4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Cmp(x4, JSValue::kSize >> kPointerSizeLog2);
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
- __ Ldrb(x4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Cmp(x4, 0);
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ Bind(&no_arguments);
+ {
+ __ LoadRoot(x0, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ Ret();
}
- __ Str(map, FieldMemOperand(new_obj, HeapObject::kMapOffset));
- Register empty = x3;
- __ LoadRoot(empty, Heap::kEmptyFixedArrayRootIndex);
- __ Str(empty, FieldMemOperand(new_obj, JSObject::kPropertiesOffset));
- __ Str(empty, FieldMemOperand(new_obj, JSObject::kElementsOffset));
+ // 3a. Convert x0 to a string.
+ __ Bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ }
- __ Str(argument, FieldMemOperand(new_obj, JSValue::kValueOffset));
+ // 3b. Convert symbol in x0 to a string.
+ __ Bind(&symbol_descriptive_string);
+ {
+ __ Push(x0);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == (4 * kPointerSize));
- __ Ret();
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
+ ASM_LOCATION("Builtins::Generate_StringConstructor_ConstructStub");
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ Bind(&not_cached);
- __ JumpIfSmi(arg, &convert_argument);
-
- // Is it a String?
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- __ Ldrb(x11, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Tbnz(x11, MaskToBit(kIsNotStringMask), &convert_argument);
- __ Mov(argument, arg);
- __ IncrementCounter(counters->string_ctor_string_value(), 1, x10, x11);
- __ B(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into x2.
- __ Bind(&convert_argument);
- __ Push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, x10, x11);
+ // 1. Load the first argument into x2 and get rid of the rest (including the
+ // receiver).
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(arg);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ Label no_arguments, done;
+ __ Cbz(x0, &no_arguments);
+ __ Sub(x0, x0, 1);
+ __ Drop(x0);
+ __ Ldr(x2, MemOperand(jssp, 2 * kPointerSize, PostIndex));
+ __ B(&done);
+ __ Bind(&no_arguments);
+ __ Drop(1);
+ __ LoadRoot(x2, Heap::kempty_stringRootIndex);
+ __ Bind(&done);
}
- __ Pop(function);
- __ Mov(argument, x0);
- __ B(&argument_is_string);
- // Load the empty string into x2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ Bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ B(&argument_is_string);
+ // 2. Make sure x2 is a string.
+ {
+ Label convert, done_convert;
+ __ JumpIfSmi(x2, &convert);
+ __ JumpIfObjectType(x2, x3, x3, FIRST_NONSTRING_TYPE, &done_convert, lo);
+ __ Bind(&convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(x1);
+ __ Move(x0, x2);
+ __ CallStub(&stub);
+ __ Move(x2, x0);
+ __ Pop(x1);
+ }
+ __ Bind(&done_convert);
+ }
- // At this point the argument is already a string. Call runtime to create a
- // string wrapper.
- __ Bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, x10, x11);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- x1 : constructor function
+ // -- x2 : the first argument
+ // -- lr : return address
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Allocate(JSValue::kSize, x0, x3, x4, &allocate, TAG_OBJECT);
+ __ Bind(&done_allocate);
+
+ // Initialize the JSValue in eax.
+ __ LoadGlobalFunctionInitialMap(x1, x3, x4);
+ __ Str(x3, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
+ __ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
+ __ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
+ __ Str(x2, FieldMemOperand(x0, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ Bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1, x2);
+ __ Push(Smi::FromInt(JSValue::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(x2, x1);
+ }
+ __ B(&done_allocate);
}
- __ Ret();
}
@@ -302,8 +322,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
@@ -314,8 +333,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
Isolate* isolate = masm->isolate();
@@ -396,15 +413,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Register obj_size = x3;
Register new_obj = x4;
__ Ldrb(obj_size, FieldMemOperand(init_map, Map::kInstanceSizeOffset));
- if (create_memento) {
- __ Add(x7, obj_size,
- Operand(AllocationMemento::kSize / kPointerSize));
- __ Allocate(x7, new_obj, x10, x11, &rt_call_reload_new_target,
- SIZE_IN_WORDS);
- } else {
- __ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
- SIZE_IN_WORDS);
- }
+ __ Allocate(obj_size, new_obj, x10, x11, &rt_call_reload_new_target,
+ SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to
// initial map and properties and elements are set to empty fixed array.
@@ -474,25 +484,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- // Fill the pre-allocated fields with undef.
- __ FillFields(first_prop, prop_fields, filler);
- __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
- __ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
- DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
- __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
- // Load the AllocationSite
- __ Peek(x14, 3 * kXRegSize);
- __ AssertUndefinedOrAllocationSite(x14, x10);
- DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
- __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
- first_prop = NoReg;
- } else {
- // Fill all of the property fields with undef.
- __ FillFields(first_prop, prop_fields, filler);
- first_prop = NoReg;
- prop_fields = NoReg;
- }
+
+ // Fill all of the property fields with undef.
+ __ FillFields(first_prop, prop_fields, filler);
+ first_prop = NoReg;
+ prop_fields = NoReg;
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -510,40 +506,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// x1: constructor function
// x3: original constructor
__ Bind(&rt_call);
- Label count_incremented;
- if (create_memento) {
- // Get the cell or allocation site.
- __ Peek(x4, 3 * kXRegSize);
- __ Push(x4, constructor, original_constructor); // arguments 1-3
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- __ Mov(x4, x0);
- // If we ended up using the runtime, and we want a memento, then the
- // runtime call made it for us, and we shouldn't do create count
- // increment.
- __ B(&count_incremented);
- } else {
- __ Push(constructor, original_constructor); // arguments 1-2
- __ CallRuntime(Runtime::kNewObject, 2);
- __ Mov(x4, x0);
- }
+ __ Push(constructor, original_constructor); // arguments 1-2
+ __ CallRuntime(Runtime::kNewObject, 2);
+ __ Mov(x4, x0);
// Receiver for constructor call allocated.
// x4: JSObject
__ Bind(&allocated);
- if (create_memento) {
- __ Peek(x10, 3 * kXRegSize);
- __ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &count_incremented);
- // r2 is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ Ldr(x5, FieldMemOperand(x10,
- AllocationSite::kPretenureCreateCountOffset));
- __ Add(x5, x5, Operand(Smi::FromInt(1)));
- __ Str(x5, FieldMemOperand(x10,
- AllocationSite::kPretenureCreateCountOffset));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ Pop(original_constructor);
__ Pop(constructor);
@@ -649,12 +619,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -751,17 +721,13 @@ enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers x10, x15; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset, Register argc,
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
- Register function = x15;
-
// Check the stack for overflow.
// We are not trying to catch interruptions (e.g. debug break and
// preemption) here, so the "real stack limit" is checked.
Label enough_stack_space;
__ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
- __ Ldr(function, MemOperand(fp, calleeOffset));
// Make x10 the space we have left. The stack might already be overflowed
// here which will cause x10 to become negative.
// TODO(jbramley): Check that the stack usage here is safe.
@@ -774,13 +740,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ Cmp(x10, Operand(argc, LSL, kPointerSizeLog2));
}
__ B(gt, &enough_stack_space);
- // There is not enough stack space, so use a builtin to throw an appropriate
- // error.
- if (argc_is_tagged == kArgcIsUntaggedInt) {
- __ SmiTag(argc);
- }
- __ Push(function, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
// We should never return from the APPLY_OVERFLOW builtin.
if (__ emit_debug_code()) {
__ Unreachable();
@@ -791,7 +751,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
// Input:
-// x0: code entry.
+// x0: new.target.
// x1: function.
// x2: receiver.
// x3: argc.
@@ -801,10 +761,12 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from JSEntryStub::GenerateBody().
+ Register new_target = x0;
Register function = x1;
Register receiver = x2;
Register argc = x3;
Register argv = x4;
+ Register scratch = x10;
ProfileEntryHookStub::MaybeCallEntryHook(masm);
@@ -815,8 +777,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ __ Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
+ masm->isolate())));
+ __ Ldr(cp, MemOperand(scratch));
__ InitializeRootRegister();
@@ -824,20 +788,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Push(function, receiver);
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, kFunctionOffset, argc,
- kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, argc, kArgcIsUntaggedInt);
// Copy arguments to the stack in a loop, in reverse order.
// x3: argc.
// x4: argv.
Label loop, entry;
// Compute the copy end address.
- __ Add(x10, argv, Operand(argc, LSL, kPointerSizeLog2));
+ __ Add(scratch, argv, Operand(argc, LSL, kPointerSizeLog2));
__ B(&entry);
__ Bind(&loop);
@@ -845,9 +804,15 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Ldr(x12, MemOperand(x11)); // Dereference the handle.
__ Push(x12); // Push the argument.
__ Bind(&entry);
- __ Cmp(x10, argv);
+ __ Cmp(scratch, argv);
__ B(ne, &loop);
+ __ Mov(scratch, argc);
+ __ Mov(argc, new_target);
+ __ Mov(new_target, scratch);
+ // x0: argc.
+ // x3: new.target.
+
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
// The original values have been saved in JSEntryStub::GenerateBody().
@@ -864,17 +829,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// x28 : JS stack pointer (jssp).
// x29 : frame pointer (fp).
- __ Mov(x0, argc);
- if (is_construct) {
- // No type feedback cell is available.
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(x0);
- __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
- }
// Exit the JS internal frame and remove the parameters (except function),
// and return.
}
@@ -945,7 +904,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Sub(x10, jssp, Operand(x11));
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ Bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -1025,9 +984,11 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments.
- // TODO(rmcilroy): Get number of arguments from BytecodeArray.
- __ Drop(1, kXRegSize);
+
+ // Drop receiver + arguments and return.
+ __ Ldr(w1, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ Drop(x1, 1);
__ Ret();
}
@@ -1288,21 +1249,15 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- enum {
- call_type_JS_func = 0,
- call_type_func_proxy = 1,
- call_type_non_func = 2
- };
Register argc = x0;
Register function = x1;
- Register call_type = x4;
Register scratch1 = x10;
Register scratch2 = x11;
- Register receiver_type = x13;
ASM_LOCATION("Builtins::Generate_FunctionCall");
// 1. Make sure we have at least one argument.
- { Label done;
+ {
+ Label done;
__ Cbnz(argc, &done);
__ LoadRoot(scratch1, Heap::kUndefinedValueRootIndex);
__ Push(scratch1);
@@ -1310,107 +1265,14 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
+ // 2. Get the callable to call (passed as receiver) from the stack.
__ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
- __ JumpIfSmi(function, &non_function);
- __ JumpIfNotObjectType(function, scratch1, receiver_type,
- JS_FUNCTION_TYPE, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Mov(call_type, static_cast<int>(call_type_JS_func));
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- // Also do not transform the receiver for native (Compilerhints already in
- // x3).
- __ Ldr(scratch1,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(scratch2.W(),
- FieldMemOperand(scratch1, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestAndBranchIfAnySet(
- scratch2.W(),
- (1 << SharedFunctionInfo::kStrictModeFunction) |
- (1 << SharedFunctionInfo::kNative),
- &shift_arguments);
-
- // Compute the receiver in sloppy mode.
- Register receiver = x2;
- __ Sub(scratch1, argc, 1);
- __ Peek(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
- __ JumpIfSmi(receiver, &convert_to_object);
-
- __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
- &use_global_proxy);
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(receiver, scratch1, scratch2,
- FIRST_SPEC_OBJECT_TYPE, &shift_arguments, ge);
-
- __ Bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(argc);
-
- __ Push(argc);
- __ Mov(x0, receiver);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Mov(receiver, x0);
-
- __ Pop(argc);
- __ SmiUntag(argc);
-
- // Exit the internal frame.
- }
-
- // Restore the function and flag in the registers.
- __ Peek(function, Operand(argc, LSL, kXRegSizeLog2));
- __ Mov(call_type, static_cast<int>(call_type_JS_func));
- __ B(&patch_receiver);
-
- __ Bind(&use_global_proxy);
- __ Ldr(receiver, GlobalObjectMemOperand());
- __ Ldr(receiver,
- FieldMemOperand(receiver, GlobalObject::kGlobalProxyOffset));
-
- __ Bind(&patch_receiver);
- __ Sub(scratch1, argc, 1);
- __ Poke(receiver, Operand(scratch1, LSL, kXRegSizeLog2));
-
- __ B(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ Bind(&slow);
- __ Mov(call_type, static_cast<int>(call_type_func_proxy));
- __ Cmp(receiver_type, JS_FUNCTION_PROXY_TYPE);
- __ B(eq, &shift_arguments);
- __ Bind(&non_function);
- __ Mov(call_type, static_cast<int>(call_type_non_func));
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // call type (0: JS function, 1: function proxy, 2: non-function)
- __ Sub(scratch1, argc, 1);
- __ Poke(function, Operand(scratch1, LSL, kXRegSizeLog2));
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
- // call type (0: JS function, 1: function proxy, 2: non-function)
- __ Bind(&shift_arguments);
- { Label loop;
+ {
+ Label loop;
// Calculate the copy start address (destination). Copy end address is jssp.
__ Add(scratch2, jssp, Operand(argc, LSL, kPointerSizeLog2));
__ Sub(scratch1, scratch2, kPointerSize);
@@ -1426,46 +1288,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Drop(1);
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // call type (0: JS function, 1: function proxy, 2: non-function)
- { Label js_function, non_proxy;
- __ Cbz(call_type, &js_function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ Mov(x2, 0);
- __ Cmp(call_type, static_cast<int>(call_type_func_proxy));
- __ B(ne, &non_proxy);
-
- __ Push(function); // Re-add proxy object as additional argument.
- __ Add(argc, argc, 1);
- __ GetBuiltinFunction(function, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ Bind(&non_proxy);
- __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ Bind(&js_function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ Ldr(x3, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x2,
- FieldMemOperand(x3,
- SharedFunctionInfo::kFormalParameterCountOffset));
- Label dont_adapt_args;
- __ Cmp(x2, argc); // Check formal and actual parameter counts.
- __ B(eq, &dont_adapt_args);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ Bind(&dont_adapt_args);
-
- __ Ldr(x3, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(x3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1549,96 +1373,30 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Ldr(args, MemOperand(fp, kArgumentsOffset));
__ Push(function, args);
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
Register argc = x0;
- Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, argc, kArgcIsSmiTagged);
- // Push current limit and index.
+ // Push current limit, index and receiver.
__ Mov(x1, 0); // Initial index.
- __ Push(argc, x1);
-
- Label push_receiver;
__ Ldr(receiver, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function. Otherwise it must be a proxy.
- // When it is not the function proxy will be invoked later.
- __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE,
- &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
- // Load the shared function info.
- __ Ldr(x2, FieldMemOperand(function,
- JSFunction::kSharedFunctionInfoOffset));
-
- // Compute and push the receiver.
- // Do not transform the receiver for strict mode functions.
- Label convert_receiver_to_object, use_global_proxy;
- __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
- __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
- // Do not transform the receiver for native functions.
- __ Tbnz(x10, SharedFunctionInfo::kNative, &push_receiver);
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(receiver, &convert_receiver_to_object);
- __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
- __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
- &use_global_proxy);
-
- // Check if the receiver is already a JavaScript object.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ JumpIfObjectType(receiver, x10, x11, FIRST_SPEC_OBJECT_TYPE,
- &push_receiver, ge);
-
- // Call a builtin to convert the receiver to a regular object.
- __ Bind(&convert_receiver_to_object);
- __ Mov(x0, receiver);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ Mov(receiver, x0);
- __ B(&push_receiver);
-
- __ Bind(&use_global_proxy);
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver
- __ Bind(&push_receiver);
- __ Push(receiver);
+ __ Push(argc, x1, receiver);
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // At the end of the loop, the number of arguments is stored in 'current',
- // represented as a smi.
-
- function = x1; // From now on we want the function to be kept in x1;
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
-
- // Call the function.
- Label call_proxy;
- ParameterCount actual(x0);
- __ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
- __ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
- frame_scope.GenerateLeaveFrame();
- __ Drop(kStackSize);
- __ Ret();
+ // At the end of the loop, the number of arguments is stored in x0, untagged
- // Call the function proxy.
- __ Bind(&call_proxy);
- // x0 : argc
- // x1 : function
- __ Push(function); // Add function proxy as last argument.
- __ Add(x0, x0, 1);
- __ Mov(x2, 0);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
+ __ Ldr(x1, MemOperand(fp, kFunctionOffset));
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
__ Drop(kStackSize);
__ Ret();
@@ -1689,10 +1447,11 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ Ldr(args, MemOperand(fp, kArgumentsOffset));
__ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
__ Push(function, args, newTarget);
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
Register argc = x0;
- Generate_CheckStackOverflow(masm, kFunctionOffset, argc, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, argc, kArgcIsSmiTagged);
// Push current limit and index & constructor function as callee.
__ Mov(x1, 0); // Initial index.
@@ -1782,6 +1541,249 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(x1);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestAndBranchIfAnySet(w3,
+ (1 << SharedFunctionInfo::kNative) |
+ (1 << SharedFunctionInfo::kStrictModeFunction),
+ &done_convert);
+ {
+ __ Peek(x3, Operand(x0, LSL, kXRegSizeLog2));
+
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSFunction)
+ // -- x2 : the shared function info.
+ // -- x3 : the receiver
+ // -- cp : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(x3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(x3, x4, x4, FIRST_JS_RECEIVER_TYPE);
+ __ B(hs, &done_convert);
+ __ JumpIfRoot(x3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
+ __ JumpIfNotRoot(x3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ Bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(x3);
+ }
+ __ B(&convert_receiver);
+ __ Bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(x0);
+ __ Push(x0, x1);
+ __ Mov(x0, x3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ Mov(x3, x0);
+ __ Pop(x1, x0);
+ __ SmiUntag(x0);
+ }
+ __ Ldr(x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Bind(&convert_receiver);
+ __ Poke(x3, Operand(x0, LSL, kXRegSizeLog2));
+ }
+ __ Bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the function to call (checked to be a JSFunction)
+ // -- x2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ Ldrsw(
+ x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ldr(x3, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(x0);
+ ParameterCount expected(x2);
+ __ InvokeCode(x3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(x1, &non_callable);
+ __ Bind(&non_smi);
+ __ CompareObjectType(x1, x4, x5, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
+ eq);
+ __ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
+ __ B(ne, &non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ Ldr(x1, FieldMemOperand(x1, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(x1);
+ __ B(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ Bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ Ldrb(x4, FieldMemOperand(x4, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x4, 1 << Map::kIsCallable, &non_callable);
+ // Overwrite the original receiver with the (original) target.
+ __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, x1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the constructor to call (checked to be a JSFunction)
+ // -- x3 : the original constructor (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(x1);
+ __ AssertFunction(x3);
+
+ // Calling convention for function specific ConstructStubs require
+ // x2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
+ __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x4);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the constructor to call (checked to be a JSFunctionProxy)
+ // -- x3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ Ldr(x1, FieldMemOperand(x1, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x1 : the constructor to call (can be any Object)
+ // -- x3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(x1, &non_constructor);
+ __ Ldr(x4, FieldMemOperand(x1, HeapObject::kMapOffset));
+ __ Ldrb(x2, FieldMemOperand(x4, Map::kBitFieldOffset));
+ __ TestAndBranchIfAllClear(x2, 1 << Map::kIsConstructor, &non_constructor);
+
+ // Dispatch based on instance type.
+ __ CompareInstanceType(x4, x5, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ Cmp(x5, JS_FUNCTION_PROXY_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ Poke(x1, Operand(x0, LSL, kXRegSizeLog2));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, x1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(x1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- x1 : the target to call (can be any Object).
+
+ // Find the address of the last argument.
+ __ add(x3, x0, Operand(1)); // Add one for receiver.
+ __ lsl(x3, x3, kPointerSizeLog2);
+ __ sub(x4, x2, x3);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Mov(x5, jssp);
+ __ Claim(x3, 1);
+ __ B(&loop_check);
+ __ Bind(&loop_header);
+ // TODO(rmcilroy): Push two at a time once we ensure we keep stack aligned.
+ __ Ldr(x3, MemOperand(x2, -kPointerSize, PostIndex));
+ __ Str(x3, MemOperand(x5, -kPointerSize, PreIndex));
+ __ Bind(&loop_check);
+ __ Cmp(x2, x4);
+ __ B(gt, &loop_header);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@@ -1815,19 +1817,19 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Register copy_to = x12;
Register scratch1 = x13, scratch2 = x14;
- __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+ __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
// Adjust for fp, lr, and the receiver.
__ Add(copy_start, fp, 3 * kPointerSize);
__ Add(copy_start, copy_start, Operand(argc_actual, LSL, kPointerSizeLog2));
- __ Sub(copy_end, copy_start, argc_expected);
+ __ Sub(copy_end, copy_start, scratch2);
__ Sub(copy_end, copy_end, kPointerSize);
__ Mov(copy_to, jssp);
// Claim space for the arguments, the receiver, and one extra slot.
// The extra slot ensures we do not write under jssp. It will be popped
// later.
- __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Add(scratch1, scratch2, 2 * kPointerSize);
__ Claim(scratch1, 1);
// Copy the arguments (including the receiver) to the new stack frame.
@@ -1880,7 +1882,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
- __ Lsl(argc_expected, argc_expected, kPointerSizeLog2);
+ __ Lsl(scratch2, argc_expected, kPointerSizeLog2);
__ Lsl(argc_actual, argc_actual, kPointerSizeLog2);
// Adjust for fp, lr, and the receiver.
@@ -1893,7 +1895,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Claim space for the arguments, the receiver, and one extra slot.
// The extra slot ensures we do not write under jssp. It will be popped
// later.
- __ Add(scratch1, argc_expected, 2 * kPointerSize);
+ __ Add(scratch1, scratch2, 2 * kPointerSize);
__ Claim(scratch1, 1);
// Copy the arguments (including the receiver) to the new stack frame.
@@ -1925,6 +1927,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Arguments have been adapted. Now call the entry point.
__ Bind(&invoke);
+ __ Mov(argc_actual, argc_expected);
+ // x0 : expected number of arguments
+ // x1 : function (passed through to callee)
__ Call(code_entry);
// Store offset of return address for deoptimizer.
@@ -1942,7 +1947,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ Unreachable();
}
}
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index 716910ea91..e39e08831a 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -4,7 +4,6 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/frames-arm64.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
@@ -16,6 +15,9 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/arm64/code-stubs-arm64.h"
+#include "src/arm64/frames-arm64.h"
+
namespace v8 {
namespace internal {
@@ -650,29 +652,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cond == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cond == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cond == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result
+ if ((cond == lt) || (cond == le)) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if ((cond == lt) || (cond == le)) {
- ncr = GREATER;
- } else {
- DCHECK((cond == gt) || (cond == ge)); // remaining cases
- ncr = LESS;
- }
- __ Mov(x10, Smi::FromInt(ncr));
- __ Push(x10);
+ DCHECK((cond == gt) || (cond == ge)); // remaining cases
+ ncr = LESS;
}
+ __ Mov(x10, Smi::FromInt(ncr));
+ __ Push(x10);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ Bind(&miss);
@@ -1505,191 +1503,107 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Stack on entry:
- // jssp[0]: function.
- // jssp[8]: object.
- //
- // Returns result in x0. Zero indicates instanceof, smi 1 indicates not
- // instanceof.
-
- Register result = x0;
- Register function = right();
- Register object = left();
- Register scratch1 = x6;
- Register scratch2 = x7;
- Register res_true = x8;
- Register res_false = x9;
- // Only used if there was an inline map check site. (See
- // LCodeGen::DoInstanceOfKnownGlobal().)
- Register map_check_site = x4;
- // Delta for the instructions generated between the inline map check and the
- // instruction setting the result.
- const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize;
-
- Label not_js_object, slow;
-
- if (!HasArgsInRegisters()) {
- __ Pop(function, object);
- }
-
- if (ReturnTrueFalseObject()) {
- __ LoadTrueFalseRoots(res_true, res_false);
- } else {
- // This is counter-intuitive, but correct.
- __ Mov(res_true, Smi::FromInt(0));
- __ Mov(res_false, Smi::FromInt(1));
- }
-
- // Check that the left hand side is a JS object and load its map as a side
- // effect.
- Register map = x12;
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch2, &not_js_object);
-
- // If there is a call site cache, don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
- __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
- __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret();
- __ Bind(&miss);
- }
-
- // Get the prototype of the function.
- Register prototype = x13;
- __ TryGetFunctionPrototype(function, prototype, scratch2, &slow,
- MacroAssembler::kMissOnBoundFunction);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (HasCallSiteInlineCheck()) {
- // Patch the (relocated) inlined map check.
- __ GetRelocatedValueLocation(map_check_site, scratch1);
- // We have a cell, so need another level of dereferencing.
- __ Ldr(scratch1, MemOperand(scratch1));
- __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset));
-
- __ Mov(x14, map);
- // |scratch1| points at the beginning of the cell. Calculate the
- // field containing the map.
- __ Add(function, scratch1, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(scratch1, Cell::kValueOffset, x14, function,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- } else {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- }
-
- Label return_true, return_result;
- Register smi_value = scratch1;
- {
- // Loop through the prototype chain looking for the function prototype.
- Register chain_map = x1;
- Register chain_prototype = x14;
- Register null_value = x15;
- Label loop;
- __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset));
- __ LoadRoot(null_value, Heap::kNullValueRootIndex);
- // Speculatively set a result.
- __ Mov(result, res_false);
- if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
- // Value to store in the cache cannot be an object.
- __ Mov(smi_value, Smi::FromInt(1));
- }
-
- __ Bind(&loop);
-
- // If the chain prototype is the object prototype, return true.
- __ Cmp(chain_prototype, prototype);
- __ B(eq, &return_true);
-
- // If the chain prototype is null, we've reached the end of the chain, so
- // return false.
- __ Cmp(chain_prototype, null_value);
- __ B(eq, &return_result);
-
- // Otherwise, load the next prototype in the chain, and loop.
- __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset));
- __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset));
- __ B(&loop);
- }
-
- // Return sequence when no arguments are on the stack.
- // We cannot fall through to here.
- __ Bind(&return_true);
- __ Mov(result, res_true);
- if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
- // Value to store in the cache cannot be an object.
- __ Mov(smi_value, Smi::FromInt(0));
- }
- __ Bind(&return_result);
- if (HasCallSiteInlineCheck()) {
- DCHECK(ReturnTrueFalseObject());
- __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
- __ GetRelocatedValueLocation(map_check_site, scratch2);
- __ Str(result, MemOperand(scratch2));
- } else {
- Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
- __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
- }
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = x1; // Object (lhs).
+ Register const function = x0; // Function (rhs).
+ Register const object_map = x2; // Map of {object}.
+ Register const function_map = x3; // Map of {function}.
+ Register const function_prototype = x4; // Prototype of {function}.
+ Register const scratch = x5;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex,
+ &fast_case);
+ __ JumpIfNotRoot(object_map, Heap::kInstanceofCacheMapRootIndex, &fast_case);
+ __ LoadRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
- Label object_not_null, object_not_null_or_smi;
-
- __ Bind(&not_js_object);
- Register object_type = x14;
- // x0 result result return register (uninit)
- // x10 function pointer to function
- // x11 object pointer to object
- // x14 object_type type of object (uninit)
-
- // Before null, smi and string checks, check that the rhs is a function.
- // For a non-function rhs, an exception must be thrown.
- __ JumpIfSmi(function, &slow);
- __ JumpIfNotObjectType(
- function, scratch1, object_type, JS_FUNCTION_TYPE, &slow);
-
- __ Mov(result, res_false);
-
- // Null is not instance of anything.
- __ Cmp(object, Operand(isolate()->factory()->null_value()));
- __ B(ne, &object_not_null);
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ Bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
+ &slow_case);
+ __ LoadRoot(x0, Heap::kFalseValueRootIndex);
__ Ret();
- __ Bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- __ Ret();
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ Bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ JumpIfNotObjectType(function, function_map, scratch, JS_FUNCTION_TYPE,
+ &slow_case);
- __ Bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch2, &slow);
- __ Ret();
+ // Ensure that {function} has an instance prototype.
+ __ Ldrb(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ Tbnz(scratch, Map::kHasNonInstancePrototype, &slow_case);
- // Slow-case. Tail call builtin.
- __ Bind(&slow);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- // Arguments have either been passed into registers or have been previously
- // popped. We need to push them before calling builtin.
- __ Push(object, function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- if (ReturnTrueFalseObject()) {
- // Reload true/false because they were clobbered in the builtin call.
- __ LoadTrueFalseRoots(res_true, res_false);
- __ Cmp(result, 0);
- __ Csel(result, res_true, res_false, eq);
- }
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ Register const scratch_w = scratch.W();
+ __ Ldr(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ // On 64-bit platforms, compiler hints field is not a smi. See definition of
+ // kCompilerHintsOffset in src/objects.h.
+ __ Ldr(scratch_w, FieldMemOperand(shared_info,
+ SharedFunctionInfo::kCompilerHintsOffset));
+ __ Tbnz(scratch_w, SharedFunctionInfo::kBoundFunction, &slow_case);
+
+ // Get the "prototype" (or initial map) of the {function}.
+ __ Ldr(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ JumpIfNotObjectType(function_prototype, scratch, scratch, MAP_TYPE,
+ &function_prototype_valid);
+ __ Ldr(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ Bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(x0, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
+ __ Bind(&loop);
+ __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Cmp(object_prototype, function_prototype);
+ __ B(eq, &done);
+ __ Cmp(object_prototype, null);
+ __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ B(ne, &loop);
+ __ LoadRoot(x0, Heap::kFalseValueRootIndex);
+ __ Bind(&done);
+ __ StoreRoot(x0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret();
+
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1745,10 +1659,13 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
+
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
@@ -1761,33 +1678,35 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
__ B(ne, &runtime);
// Patch the arguments.length and parameters pointer in the current frame.
- __ Ldr(x11, MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ Poke(x11, 0 * kXRegSize);
- __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2));
- __ Add(x10, x10, StandardFrameConstants::kCallerSPOffset);
- __ Poke(x10, 1 * kXRegSize);
+ __ Ldr(x2,
+ MemOperand(caller_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Add(x3, caller_fp, Operand::UntagSmiAndScale(x2, kPointerSizeLog2));
+ __ Add(x3, x3, StandardFrameConstants::kCallerSPOffset);
__ Bind(&runtime);
+ __ Push(x1, x3, x2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
//
// Returns pointer to result object in x0.
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Make an untagged copy of the parameter count.
// Note: arg_count_smi is an alias of param_count_smi.
- Register arg_count_smi = x3;
- Register param_count_smi = x3;
+ Register function = x1;
+ Register arg_count_smi = x2;
+ Register param_count_smi = x2;
+ Register recv_arg = x3;
Register param_count = x7;
- Register recv_arg = x14;
- Register function = x4;
- __ Pop(param_count_smi, recv_arg, function);
__ SmiUntag(param_count, param_count_smi);
// Check if the calling frame is an arguments adaptor frame.
@@ -1803,16 +1722,18 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// No adaptor, parameter count = argument count.
- // x1 mapped_params number of mapped params, min(params, args) (uninit)
- // x2 arg_count number of function arguments (uninit)
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped params, min(params, args) (uninit)
// x7 param_count number of function parameters
// x11 caller_fp caller's frame pointer
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments (uninit)
- Register arg_count = x2;
+ Register arg_count = x14;
+ Register mapped_params = x4;
__ Mov(arg_count, param_count);
+ __ Mov(mapped_params, param_count);
__ B(&try_allocate);
// We have an adaptor frame. Patch the parameters pointer.
@@ -1825,7 +1746,6 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset);
// Compute the mapped parameter count = min(param_count, arg_count)
- Register mapped_params = x1;
__ Cmp(param_count, arg_count);
__ Csel(mapped_params, param_count, arg_count, lt);
@@ -1833,13 +1753,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects: param map, backing
// store, arguments (uninit)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x7 param_count number of function parameters
// x10 size size of objects to allocate (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
// Compute the size of backing store, parameter map, and arguments object.
// 1. Parameter map, has two extra words containing context and backing
@@ -1871,13 +1791,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x7 param_count number of function parameters
// x11 sloppy_args_map offset to args (or aliased args) map (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Register global_object = x10;
Register global_ctx = x10;
@@ -1920,14 +1840,14 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x5 elements pointer to parameter map or backing store (uninit)
// x6 backing_store pointer to backing store (uninit)
// x7 param_count number of function parameters
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Register elements = x5;
__ Add(elements, alloc_obj, Heap::kSloppyArgumentsObjectSize);
@@ -1969,17 +1889,17 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x3 arg_count_smi number of function arguments (smi)
- // x4 function function pointer
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x5 elements pointer to parameter map or backing store (uninit)
// x6 backing_store pointer to backing store (uninit)
// x7 param_count number of function parameters
// x11 loop_count parameter loop counter (uninit)
// x12 index parameter index (smi, uninit)
// x13 the_hole hole value (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Register loop_count = x11;
Register index = x12;
@@ -2015,12 +1935,12 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects (param map, backing
// store, arguments)
- // x1 mapped_params number of mapped parameters, min(params, args)
- // x2 arg_count number of function arguments
- // x4 function function pointer
- // x3 arg_count_smi number of function arguments (smi)
+ // x1 function function pointer
+ // x2 arg_count_smi number of function arguments (smi)
+ // x3 recv_arg pointer to receiver arguments
+ // x4 mapped_params number of mapped parameters, min(params, args)
// x6 backing_store pointer to backing store (uninit)
- // x14 recv_arg pointer to receiver arguments
+ // x14 arg_count number of function arguments
Label arguments_loop, arguments_test;
__ Mov(x10, mapped_params);
@@ -2068,20 +1988,21 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: number of parameters (tagged)
- // jssp[8]: address of receiver argument
- // jssp[16]: function
+ // x1 : function
+ // x2 : number of parameters (tagged)
+ // x3 : parameters pointer
//
// Returns pointer to result object in x0.
- // Get the stub arguments from the frame, and make an untagged copy of the
- // parameter count.
- Register param_count_smi = x1;
- Register params = x2;
- Register function = x3;
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(x2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(x3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Make an untagged copy of the parameter count.
+ Register function = x1;
+ Register param_count_smi = x2;
+ Register params = x3;
Register param_count = x13;
- __ Pop(param_count_smi, params, function);
__ SmiUntag(param_count, param_count_smi);
// Test if arguments adaptor needed.
@@ -2094,9 +2015,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ B(ne, &try_allocate);
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
+ // x1 function function pointer
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
// x11 caller_fp caller's frame pointer
// x13 param_count number of parameters passed to function
@@ -2135,9 +2056,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
+ // x1 function function pointer
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
// x4 strict_args_map offset to arguments map
// x13 param_count number of parameters passed to function
__ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
@@ -2166,9 +2087,9 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// x0 alloc_obj pointer to allocated objects: parameter array and
// arguments object
- // x1 param_count_smi number of parameters passed to function (smi)
- // x2 params pointer to parameters
- // x3 function function pointer
+ // x1 function function pointer
+ // x2 param_count_smi number of parameters passed to function (smi)
+ // x3 params pointer to parameters
// x4 array pointer to array slot (uninit)
// x5 elements pointer to elements array of alloc_obj
// x13 param_count number of parameters passed to function
@@ -2199,56 +2120,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // Stack layout on entry.
- // jssp[0]: language mode (tagged)
- // jssp[8]: index of rest parameter (tagged)
- // jssp[16]: number of parameters (tagged)
- // jssp[24]: address of receiver argument
- //
- // Returns pointer to result object in x0.
-
- // Get the stub arguments from the frame, and make an untagged copy of the
- // parameter count.
- Register language_mode_smi = x1;
- Register rest_index_smi = x2;
- Register param_count_smi = x3;
- Register params = x4;
- Register param_count = x13;
- __ Pop(language_mode_smi, rest_index_smi, param_count_smi, params);
- __ SmiUntag(param_count, param_count_smi);
-
- // Test if arguments adaptor needed.
- Register caller_fp = x11;
- Register caller_ctx = x12;
- Label runtime;
- __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(caller_ctx, MemOperand(caller_fp,
- StandardFrameConstants::kContextOffset));
- __ Cmp(caller_ctx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ B(ne, &runtime);
-
- // x1 language_mode_smi language mode
- // x2 rest_index_smi index of rest parameter
- // x3 param_count_smi number of parameters passed to function (smi)
- // x4 params pointer to parameters
- // x11 caller_fp caller's frame pointer
- // x13 param_count number of parameters passed to function
-
- // Patch the argument length and parameters pointer.
- __ Ldr(param_count_smi,
- MemOperand(caller_fp,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiUntag(param_count, param_count_smi);
- __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
- __ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
-
- __ Bind(&runtime);
- __ Push(params, param_count_smi, rest_index_smi, language_mode_smi);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -2816,26 +2687,24 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
__ B(eq, &done);
__ Ldr(feedback_map, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
- __ B(ne, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ B(ne, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(feedback_value, &initialize);
__ B(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
-
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
- __ Cmp(function, scratch1);
- __ B(ne, &megamorphic);
- __ B(&done);
- }
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ JumpIfNotRoot(feedback_map, Heap::kAllocationSiteMapRootIndex, &miss);
+
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &megamorphic);
+ __ B(&done);
__ Bind(&miss);
@@ -2855,27 +2724,23 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, Register argc,
// indicate the ElementsKind if function is the Array constructor.
__ Bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
- __ Cmp(function, scratch1);
- __ B(ne, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, argc, function,
- feedback_vector, index, orig_construct,
- is_super);
- __ B(&done);
-
- __ Bind(&not_array_function);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch1);
+ __ Cmp(function, scratch1);
+ __ B(ne, &not_array_function);
- CreateWeakCellStub create_stub(masm->isolate());
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, argc, function,
feedback_vector, index, orig_construct, is_super);
+ __ B(&done);
+
+ __ Bind(&not_array_function);
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, argc, function,
+ feedback_vector, index, orig_construct, is_super);
__ Bind(&done);
}
@@ -2891,33 +2756,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm,
- int argc,
- Register function,
- Register type,
- Label* non_function) {
- // Check for function proxy.
- // x10 : function type.
- __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, non_function);
- __ Push(function); // put proxy as additional argument
- __ Mov(x0, argc + 1);
- __ Mov(x2, 0);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ Bind(non_function);
- __ Poke(function, argc * kXRegSize);
- __ Mov(x0, argc); // Set up the number of arguments.
- __ Mov(x2, 0);
- __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ Mov(x0, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2941,14 +2782,14 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
// x1 function the function to call
Register function = x1;
Register type = x4;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
// TODO(jbramley): This function has a lot of unnamed registers. Name them,
// and tidy things up a bit.
if (needs_checks) {
// Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &non_function);
+ __ JumpIfSmi(function, &slow);
// Goto slow case if we do not have a function.
__ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
@@ -2983,7 +2824,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ Bind(&slow);
- EmitSlowCase(masm, argc, function, type, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -3007,35 +2848,28 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// x3 : slot in feedback vector (Smi, for RecordCallTarget)
// x4 : original constructor (for IsSuperConstructorCall)
Register function = x1;
- Label slow, non_function_call;
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(function, &non_function_call);
+ __ JumpIfSmi(function, &non_function);
// Check that the function is a JSFunction.
Register object_type = x10;
__ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE,
- &slow);
+ &non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, x0, function, x2, x3, x4, x5, x11, x12,
IsSuperConstructorCall());
__ Add(x5, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2));
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into x2.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by x3 + 1.
- __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize + kPointerSize));
- } else {
Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into x2, or undefined.
- __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
- __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
- __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
- &feedback_register_initialized);
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ // Put the AllocationSite from the feedback vector into x2, or undefined.
+ __ Ldr(x2, FieldMemOperand(x5, FixedArray::kHeaderSize));
+ __ Ldr(x5, FieldMemOperand(x2, AllocationSite::kMapOffset));
+ __ JumpIfRoot(x5, Heap::kAllocationSiteMapRootIndex,
+ &feedback_register_initialized);
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(x2, x5);
}
@@ -3046,71 +2880,36 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Mov(x3, function);
}
- // Jump to the function-specific construct stub.
- Register jump_reg = x4;
- Register shared_func_info = jump_reg;
- Register cons_stub = jump_reg;
- Register cons_stub_code = jump_reg;
- __ Ldr(shared_func_info,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(cons_stub,
- FieldMemOperand(shared_func_info,
- SharedFunctionInfo::kConstructStubOffset));
- __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag);
- __ Br(cons_stub_code);
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
+ __ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
+ __ Br(x4);
- Label do_call;
- __ Bind(&slow);
- __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE);
- __ B(ne, &non_function_call);
- __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ B(&do_call);
-
- __ Bind(&non_function_call);
- __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
-
- __ Bind(&do_call);
- // Set expected number of arguments to zero (not changing x0).
- __ Mov(x2, 0);
- __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Bind(&non_function);
+ __ Mov(x3, function);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(vector, FieldMemOperand(vector,
- JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(vector, FieldMemOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
-}
-
-
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// x1 - function
// x3 - slot id
// x2 - vector
- Label miss;
+ // x4 - allocation site (loaded from vector[slot])
Register function = x1;
Register feedback_vector = x2;
Register index = x3;
- Register scratch = x4;
+ Register allocation_site = x4;
+ Register scratch = x5;
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, scratch);
__ Cmp(function, scratch);
- __ B(ne, &miss);
+ __ B(ne, miss);
__ Mov(x0, Operand(arg_count()));
- __ Add(scratch, feedback_vector,
- Operand::UntagSmiAndScale(index, kPointerSizeLog2));
- __ Ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
-
- // Verify that scratch contains an AllocationSite
- Register map = x5;
- __ Ldr(map, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ JumpIfNotRoot(map, Heap::kAllocationSiteMapRootIndex, &miss);
-
// Increment the call count for monomorphic function calls.
__ Add(feedback_vector, feedback_vector,
Operand::UntagSmiAndScale(index, kPointerSizeLog2));
@@ -3120,23 +2919,13 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ Add(index, index, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
__ Str(index, FieldMemOperand(feedback_vector, 0));
- Register allocation_site = feedback_vector;
- Register original_constructor = index;
- __ Mov(allocation_site, scratch);
- __ Mov(original_constructor, function);
+ // Set up arguments for the array constructor stub.
+ Register allocation_site_arg = feedback_vector;
+ Register original_constructor_arg = index;
+ __ Mov(allocation_site_arg, allocation_site);
+ __ Mov(original_constructor_arg, function);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- __ Unreachable();
}
@@ -3151,7 +2940,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -3216,7 +3005,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, function, type, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -3224,10 +3013,17 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
+ __ Ldr(x5, FieldMemOperand(x4, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &not_allocation_site);
+
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -3301,7 +3097,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
- __ JumpIfSmi(function, &non_function);
+ __ JumpIfSmi(function, &slow);
// Goto slow case if we do not have a function.
__ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow);
@@ -3318,10 +3114,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(x1, x2, x3);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to edi and exit the internal frame.
__ Mov(x1, x0);
@@ -3451,6 +3244,32 @@ void StringCharFromCodeGenerator::GenerateSlow(
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ // Inputs are in x0 (lhs) and x1 (rhs).
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ ASM_LOCATION("CompareICStub[Booleans]");
+ Label miss;
+
+ __ CheckMap(x1, x2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(x0, x3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ Ldr(x1, FieldMemOperand(x1, Oddball::kToNumberOffset));
+ __ AssertSmi(x1);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
+ __ AssertSmi(x0);
+ }
+ __ Sub(x0, x1, x0);
+ __ Ret();
+ }
+
+ __ Bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
// Inputs are in x0 (lhs) and x1 (rhs).
DCHECK(state() == CompareICState::SMI);
@@ -3745,8 +3564,21 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Cmp(lhs_map, map);
__ B(ne, &miss);
+ if (Token::IsEqualityOp(op())) {
__ Sub(result, rhs, lhs);
__ Ret();
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ Register ncr = x2;
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ Mov(ncr, Smi::FromInt(GREATER));
+ } else {
+ __ Mov(ncr, Smi::FromInt(LESS));
+ }
+ __ Push(lhs, rhs, ncr);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ Bind(&miss);
GenerateMiss(masm);
@@ -4076,7 +3908,39 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ Bind(&not_oddball);
__ Push(x0); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in x0.
+ Label is_number;
+ __ JumpIfSmi(x0, &is_number);
+
+ Label not_string;
+ __ JumpIfObjectType(x0, x1, x1, FIRST_NONSTRING_TYPE, &not_string, hs);
+ // x0: receiver
+ // x1: receiver instance type
+ __ Ret();
+ __ Bind(&not_string);
+
+ Label not_heap_number;
+ __ Cmp(x1, HEAP_NUMBER_TYPE);
+ __ B(ne, &not_heap_number);
+ __ Bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ Bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Cmp(x1, ODDBALL_TYPE);
+ __ B(ne, &not_oddball);
+ __ Ldr(x0, FieldMemOperand(x0, Oddball::kToStringOffset));
+ __ Ret();
+ __ Bind(&not_oddball);
+
+ __ Push(x0); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -4188,45 +4052,38 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[8]: left string
- Register right = x10;
- Register left = x11;
- Register result = x0;
- __ Pop(right, left);
+ // ----------- S t a t e -------------
+ // -- x1 : left
+ // -- x0 : right
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertString(x1);
+ __ AssertString(x0);
Label not_same;
- __ Subs(result, right, left);
+ __ Cmp(x0, x1);
__ B(ne, &not_same);
- STATIC_ASSERT(EQUAL == 0);
- __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
+ __ Mov(x0, Smi::FromInt(EQUAL));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
+ x4);
__ Ret();
__ Bind(&not_same);
// Check that both objects are sequential one-byte strings.
- __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
-
- // Compare flat one-byte strings natively. Remove arguments from stack first,
- // as this function will generate a return.
- __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
- StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
- x14, x15);
-
- __ Bind(&runtime);
+ Label runtime;
+ __ JumpIfEitherIsNotSequentialOneByteStrings(x1, x0, x12, x13, &runtime);
- // Push arguments back on to the stack.
- // sp[0] = right string
- // sp[8] = left string.
- __ Push(left, right);
+ // Compare flat one-byte strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x3,
+ x4);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, x1, x0, x12, x13, x14,
+ x15);
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
+ __ Bind(&runtime);
+ __ Push(x1, x0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -4511,33 +4368,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, x2);
+ __ EmitLoadTypeFeedbackVector(x2);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, x2);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4546,11 +4396,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
@@ -4662,8 +4511,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Bind(&try_array);
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, x7, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
__ Bind(&not_array);
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
@@ -4720,8 +4568,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ Bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, x7, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, true, &miss);
__ Bind(&not_array);
// Is it generic?
@@ -4740,8 +4587,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, x7, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x7, false, &miss);
__ Bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4753,14 +4599,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4777,11 +4623,46 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
+ Register key = VectorStoreICDescriptor::NameRegister(); // x2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
+ Register feedback = x5;
+ Register receiver_map = x6;
+ Register scratch1 = x7;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ Bind(&try_array);
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, true, &miss);
+
+ __ Bind(&not_array);
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
+ receiver, key, feedback,
+ receiver_map, scratch1, x8);
- // TODO(mvstanton): Implement.
__ Bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
}
@@ -4795,12 +4676,126 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+
+ __ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ Add(too_far, feedback,
+ Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
+ __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(pointer_reg, feedback,
+ FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
+
+ __ Bind(&next_loop);
+ __ Ldr(cached_map, MemOperand(pointer_reg));
+ __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Cmp(receiver_map, cached_map);
+ __ B(ne, &prepare_next);
+ // Is it a transitioning store?
+ __ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+ __ B(ne, &transition_call);
+
+ __ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(pointer_reg);
+
+ __ Bind(&transition_call);
+ __ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(feedback, too_far);
+ __ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(receiver_map);
+
+ __ Bind(&prepare_next);
+ __ Add(pointer_reg, pointer_reg, kPointerSize * 3);
+ __ Cmp(pointer_reg, too_far);
+ __ B(lt, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // x1
+ Register key = VectorStoreICDescriptor::NameRegister(); // x2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // x3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // x4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(x0)); // x0
+ Register feedback = x5;
+ Register receiver_map = x6;
+ Register scratch1 = x7;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ Bind(&try_array);
+ // Is it a fixed array?
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
+
+ // We have a polymorphic element handler.
+ Label try_poly_name;
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
+
+ __ Bind(&not_array);
+ // Is it generic?
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
+ &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ Bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Cmp(key, feedback);
+ __ B(ne, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
- // TODO(mvstanton): Implement.
__ Bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
}
diff --git a/deps/v8/src/arm64/codegen-arm64.cc b/deps/v8/src/arm64/codegen-arm64.cc
index c381df713d..8e927bfd90 100644
--- a/deps/v8/src/arm64/codegen-arm64.cc
+++ b/deps/v8/src/arm64/codegen-arm64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm64/codegen-arm64.h"
+
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/simulator-arm64.h"
@@ -62,7 +64,7 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
diff --git a/deps/v8/src/arm64/cpu-arm64.cc b/deps/v8/src/arm64/cpu-arm64.cc
index bde3e4aeb9..cf2cc57215 100644
--- a/deps/v8/src/arm64/cpu-arm64.cc
+++ b/deps/v8/src/arm64/cpu-arm64.cc
@@ -8,7 +8,6 @@
#include "src/arm64/utils-arm64.h"
#include "src/assembler.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
@@ -40,16 +39,7 @@ class CacheLineSizes {
void CpuFeatures::FlushICache(void* address, size_t length) {
- if (length == 0) return;
-
- if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
-
-#ifdef USE_SIMULATOR
- // TODO(all): consider doing some cache simulation to ensure every address
- // run has been synced.
- USE(address);
- USE(length);
-#else
+#ifdef V8_HOST_ARCH_ARM64
// The code below assumes user space cache operations are allowed. The goal
// of this routine is to make sure the code generated is visible to the I
// side of the CPU.
@@ -116,7 +106,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
// move this code before the code is generated.
: "cc", "memory"
); // NOLINT
-#endif
+#endif // V8_HOST_ARCH_ARM64
}
} // namespace internal
diff --git a/deps/v8/src/arm64/frames-arm64.cc b/deps/v8/src/arm64/frames-arm64.cc
index d3dea408bd..bf2fde119e 100644
--- a/deps/v8/src/arm64/frames-arm64.cc
+++ b/deps/v8/src/arm64/frames-arm64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm64/frames-arm64.h"
+
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index b49b457124..3dac70e784 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm64/interface-descriptors-arm64.h"
+
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
@@ -31,6 +33,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return x4; }
const Register VectorStoreICDescriptor::VectorRegister() { return x3; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return x4; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return x3; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return x5; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return x3; }
@@ -41,22 +48,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return x2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return x0; }
-const Register InstanceofDescriptor::left() {
- // Object to check (instanceof lhs).
- return x11;
-}
+const Register InstanceOfDescriptor::LeftRegister() { return x1; }
+const Register InstanceOfDescriptor::RightRegister() { return x0; }
-const Register InstanceofDescriptor::right() {
- // Constructor function (instanceof rhs).
- return x10;
-}
+const Register StringCompareDescriptor::LeftRegister() { return x1; }
+const Register StringCompareDescriptor::RightRegister() { return x0; }
const Register ArgumentsAccessReadDescriptor::index() { return x1; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
+const Register ArgumentsAccessNewDescriptor::function() { return x1; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return x2; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return x3; }
+
+
const Register ApiGetterDescriptor::function_address() { return x2; }
@@ -70,10 +78,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -103,6 +111,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return x0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return x0; }
@@ -204,6 +216,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x1: target
+ // x0: number of arguments
+ Register registers[] = {x1, x0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x2: length
@@ -423,6 +444,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ x0, // argument count (including receiver)
+ x2, // address of first argument
+ x1 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index 4b8208180e..e623718a1a 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/arm64/lithium-arm64.h"
+
#include <sstream>
#include "src/arm64/lithium-codegen-arm64.h"
@@ -205,13 +207,6 @@ void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -763,24 +758,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
@@ -796,6 +783,11 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
// The control instruction marking the end of a block that completed
// abruptly (e.g., threw an exception). There is nothing specific to do.
@@ -1540,13 +1532,6 @@ LInstruction* LChunkBuilder::DoForceRepresentation(
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), x0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
HGetCachedArrayIndex* instr) {
DCHECK(instr->value()->representation().IsTagged());
@@ -1586,21 +1571,22 @@ LInstruction* LChunkBuilder::DoInnerAllocatedObject(
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new(zone()) LInstanceOf(
- context,
- UseFixed(instr->left(), InstanceofStub::left()),
- UseFixed(instr->right(), InstanceofStub::right()));
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, x0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result = new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), cp),
- UseFixed(instr->left(), InstanceofStub::left()));
- return MarkAsCall(DefineFixed(result, x0), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ LOperand* scratch = TempRegister();
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, scratch);
}
@@ -1627,15 +1613,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new(zone()) LIsObjectAndBranch(value, temp1, temp2);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -2150,6 +2127,8 @@ HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
DCHECK(hleft->representation().Equals(hinstr->representation()));
DCHECK(hright->representation().Equals(hinstr->representation()));
+ if (hleft == hright) return NULL;
+
if ((hright->IsConstant() &&
LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
(hinstr->IsCommutative() && hleft->IsConstant() &&
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 70337778f4..a77a6da38f 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -82,19 +82,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsSmiAndBranch) \
V(IsStringAndBranch) \
V(IsUndetectableAndBranch) \
@@ -139,6 +137,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PreparePushArguments) \
V(PushArguments) \
V(RegExpLiteral) \
@@ -246,8 +245,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -476,6 +473,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -1481,39 +1484,30 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 0> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
- inputs_[0] = context;
- inputs_[1] = value;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
+ LOperand* scratch) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
+ temps_[0] = scratch;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
+ LOperand* scratch() const { return temps_[0]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -1606,25 +1600,6 @@ class LIsConstructCallAndBranch final : public LControlInstruction<0, 2> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 2> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1721,19 +1696,6 @@ class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
public:
LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index 32650e7ab0..108698a9ad 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -8,10 +8,10 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -630,8 +630,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info()->MayUseThis() &&
- !info()->is_native() && info()->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@@ -666,16 +665,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Allocate a local context if needed.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in x1.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
+ __ Push(x1, x10);
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -683,7 +693,7 @@ bool LCodeGen::GeneratePrologue() {
__ Push(x1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
// Context is returned in x0. It replaces the context passed to us. It's
// saved in the stack and kept live in cp.
__ Mov(cp, x0);
@@ -720,14 +730,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
-
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -981,7 +984,6 @@ void LCodeGen::DeoptimizeBranch(
}
DCHECK(environment->HasBeenRegistered());
- DCHECK(info()->IsOptimizing() || info()->IsStub());
int id = environment->deoptimization_index();
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
@@ -1132,7 +1134,7 @@ void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
intptr_t current_pc = masm()->pc_offset();
@@ -1994,11 +1996,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Set the arguments count if adaption is not needed. Assumes that x0 is
- // available to write to at this point.
- if (dont_adapt_arguments) {
- __ Mov(arity_reg, arity);
- }
+ // Always initialize x0 to the number of actual arguments.
+ __ Mov(arity_reg, arity);
// Invoke function.
__ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
@@ -2065,9 +2064,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(instr->IsMarkedAsCall());
DCHECK(ToRegister(instr->function()).is(x1));
- if (instr->hydrogen()->pass_argument_count()) {
- __ Mov(x0, Operand(instr->arity()));
- }
+ __ Mov(x0, Operand(instr->arity()));
// Change context.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
@@ -2101,11 +2098,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2834,29 +2826,6 @@ void LCodeGen::DoDummyUse(LDummyUse* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // FunctionLiteral instruction is marked as call, we can trash any register.
- DCHECK(instr->IsMarkedAsCall());
-
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
- __ Mov(x1, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, x2, x1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
@@ -3009,135 +2978,39 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- // Assert that the arguments are in the registers expected by InstanceofStub.
- DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
- DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
-
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(x0));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- // InstanceofStub returns a result in x0:
- // 0 => not an instance
- // smi 1 => instance.
- __ Cmp(x0, 0);
- __ LoadTrueFalseRoots(x0, x1);
- __ Csel(x0, x0, x1, eq);
}
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal: public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_);
- }
- virtual LInstruction* instr() { return instr_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred =
- new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label map_check, return_false, cache_miss, done;
- Register object = ToRegister(instr->value());
- Register result = ToRegister(instr->result());
- // x4 is expected in the associated deferred code and stub.
- Register map_check_site = x4;
- Register map = x5;
-
- // This instruction is marked as call. We can clobber any register.
- DCHECK(instr->IsMarkedAsCall());
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = ToRegister(instr->scratch());
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- // We must take into account that object is in x11.
- DCHECK(object.Is(x11));
- Register scratch = x10;
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &return_false);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Below we use Factory::the_hole_value() on purpose instead of loading from
- // the root array to force relocation and later be able to patch with a
- // custom value.
- InstructionAccurateScope scope(masm(), 5);
- __ bind(&map_check);
- // Will be patched with the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ ldr(scratch, Immediate(cell));
- __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
- __ cmp(map, scratch);
- __ b(&cache_miss, ne);
- // The address of this instruction is computed relative to the map check
- // above, so check the size of the code generated.
- DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
- // Will be patched with the cached result.
- __ ldr(result, Immediate(factory()->the_hole_value()));
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ JumpIfSmi(object, instr->FalseLabel(chunk_));
}
- __ B(&done);
-
- // The inlined call site cache did not match.
- // Check null and string before calling the deferred code.
- __ Bind(&cache_miss);
- // Compute the address of the map check. It must not be clobbered until the
- // InstanceOfStub has used it.
- __ Adr(map_check_site, &map_check);
- // Null is not instance of anything.
- __ JumpIfRoot(object, Heap::kNullValueRootIndex, &return_false);
-
- // String values are not instances of anything.
- // Return false if the object is a string. Otherwise, jump to the deferred
- // code.
- // Note that we can't jump directly to deferred code from
- // IsObjectJSStringType, because it uses tbz for the jump and the deferred
- // code can be out of range.
- __ IsObjectJSStringType(object, scratch, NULL, &return_false);
- __ B(deferred->entry());
-
- __ Bind(&return_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result is either true or false.
- __ Bind(deferred->exit());
- __ Bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- Register result = ToRegister(instr->result());
- DCHECK(result.Is(x0)); // InstanceofStub returns its result in x0.
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
-
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
-
- // Prepare InstanceofStub arguments.
- DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
- __ LoadObject(InstanceofStub::right(), instr->function());
- InstanceofStub stub(isolate(), flags);
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-
- // Put the result value into the result register slot.
- __ StoreToSafepointRegisterSlot(result, result);
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ Bind(&loop);
+ __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Cmp(object_prototype, prototype);
+ __ B(eq, instr->TrueLabel(chunk_));
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ __ B(eq, instr->FalseLabel(chunk_));
+ __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ B(&loop);
}
@@ -3197,31 +3070,6 @@ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
}
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Label* is_object = instr->TrueLabel(chunk_);
- Label* is_not_object = instr->FalseLabel(chunk_);
- Register value = ToRegister(instr->value());
- Register map = ToRegister(instr->temp1());
- Register scratch = ToRegister(instr->temp2());
-
- __ JumpIfSmi(value, is_not_object);
- __ JumpIfRoot(value, Heap::kNullValueRootIndex, is_object);
-
- __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
-
- // Check for undetectable objects.
- __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ TestAndBranchIfAnySet(scratch, 1 << Map::kIsUndetectable, is_not_object);
-
- // Check that instance type is in object type range.
- __ IsInstanceJSObjectType(map, scratch, NULL);
- // Flags have been updated by IsInstanceJSObjectType. We can now test the
- // flags for "le" condition to check if the object's type is a valid
- // JS object type.
- EmitBranch(instr, le);
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -4349,7 +4197,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
Register result =
is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
Register left =
- is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
+ is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left());
int32_t right = ToInteger32(instr->right());
DCHECK((right > -kMaxInt) && (right < kMaxInt));
@@ -5652,16 +5500,13 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- InlineSmiCheckInfo::EmitNotInlined(masm());
+ DCHECK(ToRegister(instr->left()).is(x1));
+ DCHECK(ToRegister(instr->right()).is(x0));
- Condition condition = TokenToCondition(op, false);
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
- EmitCompareAndBranch(instr, condition, x0, 0);
+ EmitCompareAndBranch(instr, TokenToCondition(instr->op(), false), x0, 0);
}
@@ -5805,7 +5650,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// x0 = regexp literal clone.
// x10-x12 are used as temporaries.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ LoadObject(x7, instr->hydrogen()->literals());
__ Ldr(x1, FieldMemOperand(x7, literal_offset));
__ JumpIfNotRoot(x1, Heap::kUndefinedValueRootIndex, &materialized);
@@ -5976,14 +5821,15 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
DCHECK(instr->temp1() != NULL);
- Register type = ToRegister(instr->temp1());
+ Register scratch = ToRegister(instr->temp1());
__ JumpIfSmi(value, false_label);
- __ JumpIfObjectType(value, type, type, JS_FUNCTION_TYPE, true_label);
- // HeapObject's type has been loaded into type register by JumpIfObjectType.
- EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
+ __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(scratch, scratch,
+ (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable);
} else if (String::Equals(type_name, factory->object_string())) {
DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
@@ -5992,13 +5838,13 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
__ JumpIfSmi(value, false_label);
__ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
- __ JumpIfObjectType(value, map, scratch,
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
- __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(gt, false_label);
- // Check for undetectable objects => false.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(value, map, scratch, FIRST_SPEC_OBJECT_TYPE,
+ false_label, lt);
+ // Check for callable or undetectable objects => false.
__ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
+ EmitTestAndBranch(instr, eq, scratch,
+ (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index d73b060cd7..20e572c65c 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -132,7 +132,6 @@ class LCodeGen: public LCodeGenBase {
LOperand* temp1,
LOperand* temp2);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 586df33c4d..5e8abe7215 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -4,15 +4,16 @@
#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
+#include "src/arm64/frames-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
+
namespace v8 {
namespace internal {
@@ -1611,6 +1612,19 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
+ Check(eq, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -1687,35 +1701,32 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
Ldr(target, GlobalObjectMemOperand());
- Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ Ldr(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
- Ldr(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ Ldr(target, ContextMemOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target,
- Register function,
- Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target, Register function,
+ int native_context_index) {
DCHECK(!AreAliased(target, function));
- GetBuiltinFunction(function, id);
+ GetBuiltinFunction(function, native_context_index);
// Load the code entry point from the builtins object.
Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
// Get the builtin entry in x2 and setup the function object in x1.
- GetBuiltinEntry(x2, x1, id);
+ GetBuiltinEntry(x2, x1, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(x2));
Call(x2);
@@ -1865,24 +1876,31 @@ void MacroAssembler::Jump(Register target) {
}
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond) {
+ if (cond == nv) return;
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
+ Label done;
+ if (cond != al) B(NegateCondition(cond), &done);
Mov(temp, Operand(target, rmode));
Br(temp);
+ Bind(&done);
}
-void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
+ Condition cond) {
DCHECK(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode);
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
}
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
@@ -2053,79 +2071,6 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
-
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
- FixedArray::kLengthOffset));
- Asr(mask, mask, 1); // Divide length by two.
- Sub(mask, mask, 1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
-
- JumpIfSmi(object, &is_smi);
- JumpIfNotHeapNumber(object, not_found);
-
- STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
- Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
- Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
- Eor(scratch1, scratch1, scratch2);
- And(scratch1, scratch1, mask);
-
- // Calculate address of entry in string cache: each entry consists of two
- // pointer sized fields.
- Add(scratch1, number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
- Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
- Fcmp(d0, d1);
- B(ne, not_found);
- B(&load_result_from_cache);
-
- Bind(&is_smi);
- Register scratch = scratch1;
- And(scratch, mask, Operand::UntagSmi(object));
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- Add(scratch, number_string_cache,
- Operand(scratch, LSL, kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- Cmp(object, probe);
- B(ne, not_found);
-
- // Get the result from the cache.
- Bind(&load_result_from_cache);
- Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
FPRegister value,
FPRegister scratch_d,
@@ -2559,11 +2504,11 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ Mov(x0, actual.immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- Mov(x0, actual.immediate());
if (expected.immediate() ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for builtins that
@@ -2581,11 +2526,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
} else { // expected is a register.
Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
: Operand(actual.reg());
+ Mov(x0, actual_op);
// If actual == expected perform a regular invocation.
Cmp(expected.reg(), actual_op);
B(eq, &regular_invoke);
- // Otherwise set up x0 for the argument adaptor.
- Mov(x0, actual_op);
}
// If the argument counts may mismatch, generate a call to the argument
@@ -2835,6 +2779,14 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ Ldr(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on arm64.
@@ -3046,6 +2998,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ Ldr(dst, GlobalObjectMemOperand());
+ Ldr(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
@@ -3587,35 +3545,10 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- BoundFunctionAction action) {
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
DCHECK(!AreAliased(function, result, scratch));
- Label non_instance;
- if (action == kMissOnBoundFunction) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
-
- Register scratch_w = scratch.W();
- Ldr(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // On 64-bit platforms, compiler hints field is not a smi. See definition of
- // kCompilerHintsOffset in src/objects.h.
- Ldr(scratch_w,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
-
- // Make sure that the function has an instance prototype.
- Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
- }
-
// Get the prototype or initial map from the function.
Ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -3632,15 +3565,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Get the prototype from the initial map.
Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- if (action == kMissOnBoundFunction) {
- B(&done);
-
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- Bind(&non_instance);
- GetMapConstructor(result, result, scratch, scratch);
- }
-
// All done.
Bind(&done);
}
@@ -4441,24 +4365,6 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
}
-void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
- DCHECK(!result.Is(ldr_location));
- const uint32_t kLdrLitOffset_lsb = 5;
- const uint32_t kLdrLitOffset_width = 19;
- Ldr(result, MemOperand(ldr_location));
- if (emit_debug_code()) {
- And(result, result, LoadLiteralFMask);
- Cmp(result, LoadLiteralFixed);
- Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
- // The instruction was clobbered. Reload it.
- Ldr(result, MemOperand(ldr_location));
- }
- Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
- Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
-}
-
-
void MacroAssembler::EnsureNotWhite(
Register value,
Register bitmap_scratch,
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 76e2fdb3fb..769140d917 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -959,6 +959,9 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -981,18 +984,6 @@ class MacroAssembler : public Assembler {
// floating point representation of -0.0.
void JumpIfMinusZero(Register input, Label* on_negative_zero);
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
// Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
// output.
void ClampInt32ToUint8(Register in_out);
@@ -1145,25 +1136,22 @@ class MacroAssembler : public Assembler {
int num_arguments);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in the function register.
- void GetBuiltinEntry(Register target,
- Register function,
- Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, Register function,
+ int native_context_index);
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
void Jump(Register target);
- void Jump(Address target, RelocInfo::Mode rmode);
- void Jump(Handle<Code> code, RelocInfo::Mode rmode);
- void Jump(intptr_t target, RelocInfo::Mode rmode);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target);
void Call(Label* target);
@@ -1364,26 +1352,13 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
- // Try to get function prototype of a function and puts the value in the
- // result register. Checks that the function really is a function and jumps
- // to the miss label if the fast checks fail. The function register will be
- // untouched; the other registers may be clobbered.
- enum BoundFunctionAction {
- kMissOnBoundFunction,
- kDontMissOnBoundFunction
- };
-
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- BoundFunctionAction action =
- kDontMissOnBoundFunction);
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
@@ -1616,6 +1591,9 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Frames.
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
@@ -1706,6 +1684,9 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Emit code for a truncating division by a constant. The dividend register is
// unchanged. Dividend and result must be different.
void TruncatingDiv(Register result, Register dividend, int32_t divisor);
@@ -1883,12 +1864,6 @@ class MacroAssembler : public Assembler {
Label* on_black);
- // Get the location of a relocated constant (its address in the constant pool)
- // from its load site.
- void GetRelocatedValueLocation(Register ldr_location,
- Register result);
-
-
// ---------------------------------------------------------------------------
// Debugging.
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index 83fd164bb6..4e6a9d91e1 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -177,14 +177,14 @@ double Simulator::CallDouble(byte* entry, CallArgument* args) {
int64_t Simulator::CallJS(byte* entry,
- byte* function_entry,
- JSFunction* func,
+ Object* new_target,
+ Object* target,
Object* revc,
int64_t argc,
Object*** argv) {
CallArgument args[] = {
- CallArgument(function_entry),
- CallArgument(func),
+ CallArgument(new_target),
+ CallArgument(target),
CallArgument(revc),
CallArgument(argc),
CallArgument(argv),
@@ -193,6 +193,7 @@ int64_t Simulator::CallJS(byte* entry,
return CallInt64(entry, args);
}
+
int64_t Simulator::CallRegExp(byte* entry,
String* input,
int64_t start_offset,
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 6ff0013ebd..e4d9a81ffd 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -151,6 +151,13 @@ typedef SimRegisterBase SimFPRegister; // v0-v31
class Simulator : public DecoderVisitor {
public:
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size) {
+ USE(i_cache);
+ USE(start);
+ USE(size);
+ }
+
explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate = NULL,
FILE* stream = stderr);
@@ -180,8 +187,8 @@ class Simulator : public DecoderVisitor {
// generated RegExp code with 10 parameters. These are convenience functions,
// which set up the simulator state and grab the result on return.
int64_t CallJS(byte* entry,
- byte* function_entry,
- JSFunction* func,
+ Object* new_target,
+ Object* target,
Object* revc,
int64_t argc,
Object*** argv);
diff --git a/deps/v8/src/array-iterator.js b/deps/v8/src/array-iterator.js
index 9698a07b04..bf17a0ac8c 100644
--- a/deps/v8/src/array-iterator.js
+++ b/deps/v8/src/array-iterator.js
@@ -10,7 +10,18 @@ var $arrayValues;
%CheckIsBootstrapping();
+// -----------------------------------------------------------------------
+// Imports
+
+var arrayIterationKindSymbol =
+ utils.ImportNow("array_iteration_kind_symbol");
+var arrayIteratorNextIndexSymbol =
+ utils.ImportNow("array_iterator_next_symbol");
+var arrayIteratorObjectSymbol =
+ utils.ImportNow("array_iterator_object_symbol");
var GlobalArray = global.Array;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
FUNCTION(Uint8Array)
@@ -30,10 +41,7 @@ endmacro
TYPED_ARRAYS(COPY_FROM_GLOBAL)
-var arrayIteratorObjectSymbol = GLOBAL_PRIVATE("ArrayIterator#object");
-var arrayIteratorNextIndexSymbol = GLOBAL_PRIVATE("ArrayIterator#next");
-var arrayIterationKindSymbol = GLOBAL_PRIVATE("ArrayIterator#kind");
-
+// -----------------------------------------------------------------------
function ArrayIterator() {}
@@ -54,54 +62,49 @@ function CreateArrayIterator(array, kind) {
}
-// 15.19.4.3.4 CreateItrResultObject
-function CreateIteratorResultObject(value, done) {
- return {value: value, done: done};
-}
-
-
// 22.1.5.2.2 %ArrayIteratorPrototype%[@@iterator]
function ArrayIteratorIterator() {
return this;
}
-// 15.4.5.2.2 ArrayIterator.prototype.next( )
+// ES6 section 22.1.5.2.1 %ArrayIteratorPrototype%.next( )
function ArrayIteratorNext() {
- var iterator = TO_OBJECT(this);
+ var iterator = this;
+ var value = UNDEFINED;
+ var done = true;
- if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
+ if (!IS_SPEC_OBJECT(iterator) ||
+ !HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'Array Iterator.prototype.next', this);
}
var array = GET_PRIVATE(iterator, arrayIteratorObjectSymbol);
- if (IS_UNDEFINED(array)) {
- return CreateIteratorResultObject(UNDEFINED, true);
- }
-
- var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
- var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
- var length = TO_UINT32(array.length);
-
- // "sparse" is never used.
-
- if (index >= length) {
- SET_PRIVATE(iterator, arrayIteratorObjectSymbol, UNDEFINED);
- return CreateIteratorResultObject(UNDEFINED, true);
- }
-
- SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
-
- if (itemKind == ITERATOR_KIND_VALUES) {
- return CreateIteratorResultObject(array[index], false);
+ if (!IS_UNDEFINED(array)) {
+ var index = GET_PRIVATE(iterator, arrayIteratorNextIndexSymbol);
+ var itemKind = GET_PRIVATE(iterator, arrayIterationKindSymbol);
+ var length = TO_UINT32(array.length);
+
+ // "sparse" is never used.
+
+ if (index >= length) {
+ SET_PRIVATE(iterator, arrayIteratorObjectSymbol, UNDEFINED);
+ } else {
+ SET_PRIVATE(iterator, arrayIteratorNextIndexSymbol, index + 1);
+
+ if (itemKind == ITERATOR_KIND_VALUES) {
+ value = array[index];
+ } else if (itemKind == ITERATOR_KIND_ENTRIES) {
+ value = [index, array[index]];
+ } else {
+ value = index;
+ }
+ done = false;
+ }
}
- if (itemKind == ITERATOR_KIND_ENTRIES) {
- return CreateIteratorResultObject([index, array[index]], false);
- }
-
- return CreateIteratorResultObject(index, false);
+ return %_CreateIterResultObject(value, done);
}
@@ -126,10 +129,10 @@ function ArrayKeys() {
utils.InstallFunctions(ArrayIterator.prototype, DONT_ENUM, [
'next', ArrayIteratorNext
]);
-utils.SetFunctionName(ArrayIteratorIterator, symbolIterator);
-%AddNamedProperty(ArrayIterator.prototype, symbolIterator,
+utils.SetFunctionName(ArrayIteratorIterator, iteratorSymbol);
+%AddNamedProperty(ArrayIterator.prototype, iteratorSymbol,
ArrayIteratorIterator, DONT_ENUM);
-%AddNamedProperty(ArrayIterator.prototype, symbolToStringTag,
+%AddNamedProperty(ArrayIterator.prototype, toStringTagSymbol,
"Array Iterator", READ_ONLY | DONT_ENUM);
utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
@@ -142,14 +145,14 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
// InstallFunctions block, as it'll be redundant.
utils.SetFunctionName(ArrayValues, 'values');
-%AddNamedProperty(GlobalArray.prototype, symbolIterator, ArrayValues,
+%AddNamedProperty(GlobalArray.prototype, iteratorSymbol, ArrayValues,
DONT_ENUM);
macro EXTEND_TYPED_ARRAY(NAME)
%AddNamedProperty(GlobalNAME.prototype, 'entries', ArrayEntries, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype, 'values', ArrayValues, DONT_ENUM);
%AddNamedProperty(GlobalNAME.prototype, 'keys', ArrayKeys, DONT_ENUM);
- %AddNamedProperty(GlobalNAME.prototype, symbolIterator, ArrayValues,
+ %AddNamedProperty(GlobalNAME.prototype, iteratorSymbol, ArrayValues,
DONT_ENUM);
endmacro
@@ -158,14 +161,8 @@ TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
// -------------------------------------------------------------------
// Exports
-utils.Export(function(to) {
- to.ArrayIteratorCreateResultObject = CreateIteratorResultObject;
-});
-
$arrayValues = ArrayValues;
-utils.ExportToRuntime(function(to) {
- to.ArrayValues = ArrayValues;
-});
+%InstallToContext(["array_values_iterator", ArrayValues]);
})
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 4520a34e35..bf04bb7e7b 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -2,14 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $arrayConcat;
-var $arrayPush;
-var $arrayPop;
-var $arrayShift;
-var $arraySlice;
-var $arraySplice;
-var $arrayUnshift;
-
(function(global, utils) {
"use strict";
@@ -30,6 +22,7 @@ var ObjectIsSealed;
var ObjectToString;
var ToNumber;
var ToString;
+var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
Delete = from.Delete;
@@ -73,7 +66,7 @@ function GetSortedArrayKeys(array, indices) {
}
}
}
- %_CallFunction(keys, function(a, b) { return a - b; }, ArraySort);
+ keys.sort(function(a, b) { return a - b; });
}
return keys;
}
@@ -216,10 +209,11 @@ function Join(array, length, separator, convert) {
function ConvertToString(x) {
- // Assumes x is a non-string.
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- return (IS_NULL_OR_UNDEFINED(x)) ? '' : ToString($defaultString(x));
+ if (IS_NULL_OR_UNDEFINED(x)) {
+ return '';
+ } else {
+ return TO_STRING(x);
+ }
}
@@ -231,7 +225,7 @@ function ConvertToLocaleString(e) {
// must throw a TypeError if ToObject(e).toLocaleString isn't
// callable.
var e_obj = TO_OBJECT(e);
- return ToString(e_obj.toLocaleString());
+ return TO_STRING(e_obj.toLocaleString());
}
}
@@ -394,15 +388,15 @@ function ArrayToString() {
array = TO_OBJECT(this);
func = array.join;
}
- if (!IS_SPEC_FUNCTION(func)) {
+ if (!IS_CALLABLE(func)) {
return %_CallFunction(array, ObjectToString);
}
- return %_CallFunction(array, func);
+ return %_Call(func, array);
}
function InnerArrayToLocaleString(array, length) {
- var len = TO_UINT32(length);
+ var len = TO_LENGTH_OR_UINT32(length);
if (len === 0) return "";
return Join(array, len, ',', ConvertToLocaleString);
}
@@ -418,8 +412,8 @@ function ArrayToLocaleString() {
function InnerArrayJoin(separator, array, length) {
if (IS_UNDEFINED(separator)) {
separator = ',';
- } else if (!IS_STRING(separator)) {
- separator = $nonStringToString(separator);
+ } else {
+ separator = TO_STRING(separator);
}
var result = %_FastOneByteArrayJoin(array, separator);
@@ -428,9 +422,8 @@ function InnerArrayJoin(separator, array, length) {
// Fast case for one-element arrays.
if (length === 1) {
var e = array[0];
- if (IS_STRING(e)) return e;
if (IS_NULL_OR_UNDEFINED(e)) return '';
- return $nonStringToString(e);
+ return TO_STRING(e);
}
return Join(array, length, separator, ConvertToString);
@@ -441,7 +434,7 @@ function ArrayJoin(separator) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.join");
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
return InnerArrayJoin(separator, array, length);
}
@@ -470,7 +463,7 @@ function ArrayPop() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.pop");
var array = TO_OBJECT(this);
- var n = TO_UINT32(array.length);
+ var n = TO_LENGTH_OR_UINT32(array.length);
if (n == 0) {
array.length = n;
return;
@@ -488,7 +481,7 @@ function ArrayPop() {
function ObservedArrayPush() {
- var n = TO_UINT32(this.length);
+ var n = TO_LENGTH_OR_UINT32(this.length);
var m = %_ArgumentsLength();
try {
@@ -516,7 +509,7 @@ function ArrayPush() {
return ObservedArrayPush.apply(this, arguments);
var array = TO_OBJECT(this);
- var n = TO_UINT32(array.length);
+ var n = TO_LENGTH_OR_UINT32(array.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
@@ -529,24 +522,6 @@ function ArrayPush() {
}
-// Returns an array containing the array elements of the object followed
-// by the array elements of each argument in order. See ECMA-262,
-// section 15.4.4.7.
-function ArrayConcatJS(arg1) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.concat");
-
- var array = TO_OBJECT(this);
- var arg_count = %_ArgumentsLength();
- var arrays = new InternalArray(1 + arg_count);
- arrays[0] = array;
- for (var i = 0; i < arg_count; i++) {
- arrays[i + 1] = %_Arguments(i);
- }
-
- return %ArrayConcat(arrays);
-}
-
-
// For implementing reverse() on large, sparse arrays.
function SparseReverse(array, len) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
@@ -631,7 +606,7 @@ function ArrayReverse() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
var array = TO_OBJECT(this);
- var len = TO_UINT32(array.length);
+ var len = TO_LENGTH_OR_UINT32(array.length);
var isArray = IS_ARRAY(array);
if (UseSparseVariant(array, len, isArray, len)) {
@@ -666,7 +641,7 @@ function ArrayShift() {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.shift");
var array = TO_OBJECT(this);
- var len = TO_UINT32(array.length);
+ var len = TO_LENGTH_OR_UINT32(array.length);
if (len === 0) {
array.length = 0;
@@ -693,7 +668,7 @@ function ArrayShift() {
function ObservedArrayUnshift() {
- var len = TO_UINT32(this.length);
+ var len = TO_LENGTH_OR_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
try {
@@ -720,7 +695,7 @@ function ArrayUnshift(arg1) { // length == 1
return ObservedArrayUnshift.apply(this, arguments);
var array = TO_OBJECT(this);
- var len = TO_UINT32(array.length);
+ var len = TO_LENGTH_OR_UINT32(array.length);
var num_arguments = %_ArgumentsLength();
if (len > 0 && UseSparseVariant(array, len, IS_ARRAY(array), len) &&
@@ -744,7 +719,7 @@ function ArraySlice(start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.slice");
var array = TO_OBJECT(this);
- var len = TO_UINT32(array.length);
+ var len = TO_LENGTH_OR_UINT32(array.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -815,7 +790,7 @@ function ComputeSpliceDeleteCount(delete_count, num_arguments, len, start_i) {
function ObservedArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength();
- var len = TO_UINT32(this.length);
+ var len = TO_LENGTH_OR_UINT32(this.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
@@ -862,7 +837,7 @@ function ArraySplice(start, delete_count) {
var num_arguments = %_ArgumentsLength();
var array = TO_OBJECT(this);
- var len = TO_UINT32(array.length);
+ var len = TO_LENGTH_OR_UINT32(array.length);
var start_i = ComputeSpliceStartIndex(TO_INTEGER(start), len);
var del_count = ComputeSpliceDeleteCount(delete_count, num_arguments, len,
start_i);
@@ -907,11 +882,11 @@ function ArraySplice(start, delete_count) {
}
-function InnerArraySort(length, comparefn) {
+function InnerArraySort(array, length, comparefn) {
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
- if (!IS_SPEC_FUNCTION(comparefn)) {
+ if (!IS_CALLABLE(comparefn)) {
comparefn = function (x, y) {
if (x === y) return 0;
if (%_IsSmi(x) && %_IsSmi(y)) {
@@ -928,7 +903,7 @@ function InnerArraySort(length, comparefn) {
var element = a[i];
for (var j = i - 1; j >= from; j--) {
var tmp = a[j];
- var order = %_CallFunction(UNDEFINED, tmp, element, comparefn);
+ var order = comparefn(tmp, element);
if (order > 0) {
a[j + 1] = tmp;
} else {
@@ -940,15 +915,19 @@ function InnerArraySort(length, comparefn) {
};
var GetThirdIndex = function(a, from, to) {
- var t_array = [];
+ var t_array = new InternalArray();
// Use both 'from' and 'to' to determine the pivot candidates.
var increment = 200 + ((to - from) & 15);
- for (var i = from + 1, j = 0; i < to - 1; i += increment, j++) {
+ var j = 0;
+ from += 1;
+ to -= 1;
+ for (var i = from; i < to; i += increment) {
t_array[j] = [i, a[i]];
+ j++;
}
- %_CallFunction(t_array, function(a, b) {
- return %_CallFunction(UNDEFINED, a[1], b[1], comparefn);
- }, ArraySort);
+ t_array.sort(function(a, b) {
+ return comparefn(a[1], b[1]);
+ });
var third_index = t_array[t_array.length >> 1][0];
return third_index;
}
@@ -970,14 +949,14 @@ function InnerArraySort(length, comparefn) {
var v0 = a[from];
var v1 = a[to - 1];
var v2 = a[third_index];
- var c01 = %_CallFunction(UNDEFINED, v0, v1, comparefn);
+ var c01 = comparefn(v0, v1);
if (c01 > 0) {
// v1 < v0, so swap them.
var tmp = v0;
v0 = v1;
v1 = tmp;
} // v0 <= v1.
- var c02 = %_CallFunction(UNDEFINED, v0, v2, comparefn);
+ var c02 = comparefn(v0, v2);
if (c02 >= 0) {
// v2 <= v0 <= v1.
var tmp = v0;
@@ -986,7 +965,7 @@ function InnerArraySort(length, comparefn) {
v1 = tmp;
} else {
// v0 <= v1 && v0 < v2
- var c12 = %_CallFunction(UNDEFINED, v1, v2, comparefn);
+ var c12 = comparefn(v1, v2);
if (c12 > 0) {
// v0 <= v2 < v1
var tmp = v1;
@@ -1007,7 +986,7 @@ function InnerArraySort(length, comparefn) {
// From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i];
- var order = %_CallFunction(UNDEFINED, element, pivot, comparefn);
+ var order = comparefn(element, pivot);
if (order < 0) {
a[i] = a[low_end];
a[low_end] = element;
@@ -1017,7 +996,7 @@ function InnerArraySort(length, comparefn) {
high_start--;
if (high_start == i) break partition;
var top_elem = a[high_start];
- order = %_CallFunction(UNDEFINED, top_elem, pivot, comparefn);
+ order = comparefn(top_elem, pivot);
} while (order > 0);
a[i] = a[high_start];
a[high_start] = element;
@@ -1152,9 +1131,9 @@ function InnerArraySort(length, comparefn) {
return first_undefined;
};
- if (length < 2) return this;
+ if (length < 2) return array;
- var is_array = IS_ARRAY(this);
+ var is_array = IS_ARRAY(array);
var max_prototype_element;
if (!is_array) {
// For compatibility with JSC, we also sort elements inherited from
@@ -1165,28 +1144,28 @@ function InnerArraySort(length, comparefn) {
// The specification allows "implementation dependent" behavior
// if an element on the prototype chain has an element that
// might interact with sorting.
- max_prototype_element = CopyFromPrototype(this, length);
+ max_prototype_element = CopyFromPrototype(array, length);
}
// %RemoveArrayHoles returns -1 if fast removal is not supported.
- var num_non_undefined = %RemoveArrayHoles(this, length);
+ var num_non_undefined = %RemoveArrayHoles(array, length);
if (num_non_undefined == -1) {
// The array is observed, or there were indexed accessors in the array.
// Move array holes and undefineds to the end using a Javascript function
// that is safe in the presence of accessors and is observable.
- num_non_undefined = SafeRemoveArrayHoles(this);
+ num_non_undefined = SafeRemoveArrayHoles(array);
}
- QuickSort(this, 0, num_non_undefined);
+ QuickSort(array, 0, num_non_undefined);
if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
// For compatibility with JSC, we shadow any elements in the prototype
// chain that has become exposed by sort moving a hole to its position.
- ShadowPrototypeElements(this, num_non_undefined, max_prototype_element);
+ ShadowPrototypeElements(array, num_non_undefined, max_prototype_element);
}
- return this;
+ return array;
}
@@ -1194,8 +1173,8 @@ function ArraySort(comparefn) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.sort");
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
- return %_CallFunction(array, length, comparefn, InnerArraySort);
+ var length = TO_LENGTH_OR_UINT32(array.length);
+ return InnerArraySort(array, length, comparefn);
}
@@ -1203,13 +1182,7 @@ function ArraySort(comparefn) {
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
function InnerArrayFilter(f, receiver, array, length) {
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var accumulator = new InternalArray();
var accumulator_length = 0;
@@ -1220,13 +1193,14 @@ function InnerArrayFilter(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- if (%_CallFunction(new_receiver, element, i, array, f)) {
+ if (%_Call(f, receiver, element, i, array)) {
accumulator[accumulator_length++] = element;
}
}
}
- return accumulator;
+ var result = new GlobalArray();
+ %MoveArrayContents(accumulator, result);
+ return result;
}
function ArrayFilter(f, receiver) {
@@ -1235,21 +1209,12 @@ function ArrayFilter(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
- var accumulator = InnerArrayFilter(f, receiver, array, length);
- var result = new GlobalArray();
- %MoveArrayContents(accumulator, result);
- return result;
+ var length = TO_LENGTH_OR_UINT32(array.length);
+ return InnerArrayFilter(f, receiver, array, length);
}
function InnerArrayForEach(f, receiver, array, length) {
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@@ -1258,8 +1223,7 @@ function InnerArrayForEach(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- %_CallFunction(new_receiver, element, i, array, f);
+ %_Call(f, receiver, element, i, array);
}
}
}
@@ -1270,19 +1234,13 @@ function ArrayForEach(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
InnerArrayForEach(f, receiver, array, length);
}
function InnerArraySome(f, receiver, array, length) {
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@@ -1291,8 +1249,7 @@ function InnerArraySome(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- if (%_CallFunction(new_receiver, element, i, array, f)) return true;
+ if (%_Call(f, receiver, element, i, array)) return true;
}
}
return false;
@@ -1307,19 +1264,13 @@ function ArraySome(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
return InnerArraySome(f, receiver, array, length);
}
function InnerArrayEvery(f, receiver, array, length) {
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var is_array = IS_ARRAY(array);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
@@ -1328,8 +1279,7 @@ function InnerArrayEvery(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- if (!%_CallFunction(new_receiver, element, i, array, f)) return false;
+ if (!%_Call(f, receiver, element, i, array)) return false;
}
}
return true;
@@ -1341,19 +1291,13 @@ function ArrayEvery(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
return InnerArrayEvery(f, receiver, array, length);
}
function InnerArrayMap(f, receiver, array, length) {
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var accumulator = new InternalArray(length);
var is_array = IS_ARRAY(array);
@@ -1363,11 +1307,12 @@ function InnerArrayMap(f, receiver, array, length) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- accumulator[i] = %_CallFunction(new_receiver, element, i, array, f);
+ accumulator[i] = %_Call(f, receiver, element, i, array);
}
}
- return accumulator;
+ var result = new GlobalArray();
+ %MoveArrayContents(accumulator, result);
+ return result;
}
@@ -1377,11 +1322,8 @@ function ArrayMap(f, receiver) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
- var accumulator = InnerArrayMap(f, receiver, array, length);
- var result = new GlobalArray();
- %MoveArrayContents(accumulator, result);
- return result;
+ var length = TO_LENGTH_OR_UINT32(array.length);
+ return InnerArrayMap(f, receiver, array, length);
}
@@ -1389,7 +1331,7 @@ function ArrayMap(f, receiver) {
// at the callsite since ToInteger(undefined) == 0; however, for
// .lastIndexOf, we need to pass it, since the behavior for passing
// undefined is 0 but for not including the argument is length-1.
-function InnerArrayIndexOf(element, index, length) {
+function InnerArrayIndexOf(array, element, index, length) {
if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
index = 0;
@@ -1404,9 +1346,9 @@ function InnerArrayIndexOf(element, index, length) {
}
var min = index;
var max = length;
- if (UseSparseVariant(this, length, IS_ARRAY(this), max - min)) {
- %NormalizeElements(this);
- var indices = %GetArrayKeys(this, length);
+ if (UseSparseVariant(array, length, IS_ARRAY(array), max - min)) {
+ %NormalizeElements(array);
+ var indices = %GetArrayKeys(array, length);
if (IS_NUMBER(indices)) {
// It's an interval.
max = indices; // Capped by length already.
@@ -1414,13 +1356,13 @@ function InnerArrayIndexOf(element, index, length) {
} else {
if (indices.length == 0) return -1;
// Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, indices);
+ var sortedKeys = GetSortedArrayKeys(array, indices);
var n = sortedKeys.length;
var i = 0;
while (i < n && sortedKeys[i] < index) i++;
while (i < n) {
var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && this[key] === element) return key;
+ if (!IS_UNDEFINED(key) && array[key] === element) return key;
i++;
}
return -1;
@@ -1429,13 +1371,13 @@ function InnerArrayIndexOf(element, index, length) {
// Lookup through the array.
if (!IS_UNDEFINED(element)) {
for (var i = min; i < max; i++) {
- if (this[i] === element) return i;
+ if (array[i] === element) return i;
}
return -1;
}
// Lookup through the array.
for (var i = min; i < max; i++) {
- if (IS_UNDEFINED(this[i]) && i in this) {
+ if (IS_UNDEFINED(array[i]) && i in array) {
return i;
}
}
@@ -1446,12 +1388,12 @@ function InnerArrayIndexOf(element, index, length) {
function ArrayIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.indexOf");
- var length = TO_UINT32(this.length);
- return %_CallFunction(this, element, index, length, InnerArrayIndexOf);
+ var length = TO_LENGTH_OR_UINT32(this.length);
+ return InnerArrayIndexOf(this, element, index, length);
}
-function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
+function InnerArrayLastIndexOf(array, element, index, length, argumentsLength) {
if (length == 0) return -1;
if (argumentsLength < 2) {
index = length - 1;
@@ -1465,9 +1407,9 @@ function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
}
var min = 0;
var max = index;
- if (UseSparseVariant(this, length, IS_ARRAY(this), index)) {
- %NormalizeElements(this);
- var indices = %GetArrayKeys(this, index + 1);
+ if (UseSparseVariant(array, length, IS_ARRAY(array), index)) {
+ %NormalizeElements(array);
+ var indices = %GetArrayKeys(array, index + 1);
if (IS_NUMBER(indices)) {
// It's an interval.
max = indices; // Capped by index already.
@@ -1475,11 +1417,11 @@ function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
} else {
if (indices.length == 0) return -1;
// Get all the keys in sorted order.
- var sortedKeys = GetSortedArrayKeys(this, indices);
+ var sortedKeys = GetSortedArrayKeys(array, indices);
var i = sortedKeys.length - 1;
while (i >= 0) {
var key = sortedKeys[i];
- if (!IS_UNDEFINED(key) && this[key] === element) return key;
+ if (!IS_UNDEFINED(key) && array[key] === element) return key;
i--;
}
return -1;
@@ -1488,12 +1430,12 @@ function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
// Lookup through the array.
if (!IS_UNDEFINED(element)) {
for (var i = max; i >= min; i--) {
- if (this[i] === element) return i;
+ if (array[i] === element) return i;
}
return -1;
}
for (var i = max; i >= min; i--) {
- if (IS_UNDEFINED(this[i]) && i in this) {
+ if (IS_UNDEFINED(array[i]) && i in array) {
return i;
}
}
@@ -1504,14 +1446,14 @@ function InnerArrayLastIndexOf(element, index, length, argumentsLength) {
function ArrayLastIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
- var length = TO_UINT32(this.length);
- return %_CallFunction(this, element, index, length,
- %_ArgumentsLength(), InnerArrayLastIndexOf);
+ var length = TO_LENGTH_OR_UINT32(this.length);
+ return InnerArrayLastIndexOf(this, element, index, length,
+ %_ArgumentsLength());
}
function InnerArrayReduce(callback, current, array, length, argumentsLength) {
- if (!IS_SPEC_FUNCTION(callback)) {
+ if (!IS_CALLABLE(callback)) {
throw MakeTypeError(kCalledNonCallable, callback);
}
@@ -1533,7 +1475,7 @@ function InnerArrayReduce(callback, current, array, length, argumentsLength) {
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(callback);
- current = %_CallFunction(UNDEFINED, current, element, i, array, callback);
+ current = callback(current, element, i, array);
}
}
return current;
@@ -1546,7 +1488,7 @@ function ArrayReduce(callback, current) {
// Pull out the length so that modifications to the length in the
// loop will not affect the looping and side effects are visible.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
return InnerArrayReduce(callback, current, array, length,
%_ArgumentsLength());
}
@@ -1554,7 +1496,7 @@ function ArrayReduce(callback, current) {
function InnerArrayReduceRight(callback, current, array, length,
argumentsLength) {
- if (!IS_SPEC_FUNCTION(callback)) {
+ if (!IS_CALLABLE(callback)) {
throw MakeTypeError(kCalledNonCallable, callback);
}
@@ -1576,7 +1518,7 @@ function InnerArrayReduceRight(callback, current, array, length,
var element = array[i];
// Prepare break slots for debugger step in.
if (stepping) %DebugPrepareStepInIfStepping(callback);
- current = %_CallFunction(UNDEFINED, current, element, i, array, callback);
+ current = callback(current, element, i, array);
}
}
return current;
@@ -1589,7 +1531,7 @@ function ArrayReduceRight(callback, current) {
// Pull out the length so that side effects are visible before the
// callback function is checked.
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
return InnerArrayReduceRight(callback, current, array, length,
%_ArgumentsLength());
}
@@ -1618,7 +1560,7 @@ var unscopables = {
keys: true,
};
-%AddNamedProperty(GlobalArray.prototype, symbolUnscopables, unscopables,
+%AddNamedProperty(GlobalArray.prototype, unscopablesSymbol, unscopables,
DONT_ENUM | READ_ONLY);
// Set up non-enumerable functions on the Array object.
@@ -1649,7 +1591,6 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush, 1),
- "concat", getFunction("concat", ArrayConcatJS, 1),
"reverse", getFunction("reverse", ArrayReverse),
"shift", getFunction("shift", ArrayShift),
"unshift", getFunction("unshift", ArrayUnshift, 1),
@@ -1673,12 +1614,12 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
// exposed to user code.
// Adding only the functions that are actually used.
utils.SetUpLockedPrototype(InternalArray, GlobalArray(), [
- "concat", getFunction("concat", ArrayConcatJS),
"indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
"push", getFunction("push", ArrayPush),
"shift", getFunction("shift", ArrayShift),
+ "sort", getFunction("sort", ArraySort),
"splice", getFunction("splice", ArraySplice)
]);
@@ -1695,6 +1636,7 @@ utils.SetUpLockedPrototype(InternalPackedArray, GlobalArray(), [
utils.Export(function(to) {
to.ArrayIndexOf = ArrayIndexOf;
to.ArrayJoin = ArrayJoin;
+ to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
to.InnerArrayEvery = InnerArrayEvery;
to.InnerArrayFilter = InnerArrayFilter;
@@ -1711,12 +1653,13 @@ utils.Export(function(to) {
to.PackedArrayReverse = PackedArrayReverse;
});
-$arrayConcat = ArrayConcatJS;
-$arrayPush = ArrayPush;
-$arrayPop = ArrayPop;
-$arrayShift = ArrayShift;
-$arraySlice = ArraySlice;
-$arraySplice = ArraySplice;
-$arrayUnshift = ArrayUnshift;
+%InstallToContext([
+ "array_pop", ArrayPop,
+ "array_push", ArrayPush,
+ "array_shift", ArrayShift,
+ "array_splice", ArraySplice,
+ "array_slice", ArraySlice,
+ "array_unshift", ArrayUnshift,
+]);
});
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index 2edcd12cad..0db0c2bf04 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -16,6 +16,7 @@ var GlobalObject = global.Object;
var MathMax;
var MathMin;
var ToNumber;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MathMax = from.MathMax;
@@ -93,9 +94,10 @@ function ArrayBufferIsViewJS(obj) {
GlobalArrayBuffer.prototype, "constructor", GlobalArrayBuffer, DONT_ENUM);
%AddNamedProperty(GlobalArrayBuffer.prototype,
- symbolToStringTag, "ArrayBuffer", DONT_ENUM | READ_ONLY);
+ toStringTagSymbol, "ArrayBuffer", DONT_ENUM | READ_ONLY);
-utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
+utils.InstallGetter(GlobalArrayBuffer.prototype, "byteLength",
+ ArrayBufferGetByteLen);
utils.InstallFunctions(GlobalArrayBuffer, DONT_ENUM, [
"isView", ArrayBufferIsViewJS
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index b7550bb795..dd05a07750 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -44,16 +44,17 @@
#include "src/builtins.h"
#include "src/codegen.h"
#include "src/counters.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime/runtime.h"
+#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serialize.h"
#include "src/token.h"
@@ -155,6 +156,31 @@ AssemblerBase::~AssemblerBase() {
}
+void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
+ if (size == 0) return;
+ if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
+
+#if defined(USE_SIMULATOR)
+ Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
+#else
+ CpuFeatures::FlushICache(start, size);
+#endif // USE_SIMULATOR
+}
+
+
+void AssemblerBase::FlushICacheWithoutIsolate(void* start, size_t size) {
+ // Ideally we would just call Isolate::Current() here. However, this flushes
+ // out issues because we usually only need the isolate when in the simulator.
+ Isolate* isolate;
+#if defined(USE_SIMULATOR)
+ isolate = Isolate::Current();
+#else
+ isolate = nullptr;
+#endif // USE_SIMULATOR
+ FlushICache(isolate, start, size);
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
@@ -980,14 +1006,13 @@ ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
: address_(isolate->builtins()->builtin_address(name)) {}
-ExternalReference::ExternalReference(Runtime::FunctionId id,
- Isolate* isolate)
- : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
+ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
+ : address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
ExternalReference::ExternalReference(const Runtime::Function* f,
Isolate* isolate)
- : address_(Redirect(isolate, f->entry)) {}
+ : address_(Redirect(isolate, f->entry)) {}
ExternalReference ExternalReference::isolate_address(Isolate* isolate) {
@@ -1023,12 +1048,6 @@ ExternalReference ExternalReference::
}
-ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(CpuFeatures::FlushICache)));
-}
-
-
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
@@ -1141,7 +1160,7 @@ ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
- return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
+ return ExternalReference(isolate->heap()->store_buffer_top_address());
}
@@ -1388,6 +1407,12 @@ ExternalReference
}
+ExternalReference ExternalReference::vector_store_virtual_register(
+ Isolate* isolate) {
+ return ExternalReference(isolate->vector_store_virtual_register_address());
+}
+
+
double power_helper(double x, double y) {
int y_int = static_cast<int>(y);
if (y == y_int) {
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index a4c19e6b02..433b9b8456 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -101,6 +101,11 @@ class AssemblerBase: public Malloced {
static const int kMinimalBufferSize = 4*KB;
+ static void FlushICache(Isolate* isolate, void* start, size_t size);
+
+ // TODO(all): Help get rid of this one.
+ static void FlushICacheWithoutIsolate(void* start, size_t size);
+
protected:
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
@@ -891,7 +896,6 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference store_buffer_overflow_function(
Isolate* isolate);
- static ExternalReference flush_icache_function(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
static ExternalReference get_date_field_function(Isolate* isolate);
@@ -986,6 +990,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
+ static ExternalReference vector_store_virtual_register(Isolate* isolate);
+
Address address() const { return reinterpret_cast<Address>(address_); }
// Used to check if single stepping is enabled in generated code.
diff --git a/deps/v8/src/assert-scope.cc b/deps/v8/src/assert-scope.cc
index 6cc2e5a081..3852709966 100644
--- a/deps/v8/src/assert-scope.cc
+++ b/deps/v8/src/assert-scope.cc
@@ -145,8 +145,6 @@ template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>;
-template class PerIsolateAssertScope<ALLOCATION_FAILURE_ASSERT, false>;
-template class PerIsolateAssertScope<ALLOCATION_FAILURE_ASSERT, true>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
diff --git a/deps/v8/src/assert-scope.h b/deps/v8/src/assert-scope.h
index 0f1e056fba..8757a32910 100644
--- a/deps/v8/src/assert-scope.h
+++ b/deps/v8/src/assert-scope.h
@@ -29,7 +29,6 @@ enum PerThreadAssertType {
enum PerIsolateAssertType {
JAVASCRIPT_EXECUTION_ASSERT,
JAVASCRIPT_EXECUTION_THROWS,
- ALLOCATION_FAILURE_ASSERT,
DEOPTIMIZATION_ASSERT,
COMPILATION_ASSERT
};
@@ -156,14 +155,6 @@ typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>
NoThrowOnJavascriptExecution;
-// Scope to document where we do not expect an allocation failure.
-typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, false>
- DisallowAllocationFailure;
-
-// Scope to introduce an exception to DisallowAllocationFailure.
-typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true>
- AllowAllocationFailure;
-
// Scope to document where we do not expect deoptimization.
typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false>
DisallowDeoptimization;
diff --git a/deps/v8/src/ast-expression-visitor.cc b/deps/v8/src/ast-expression-visitor.cc
new file mode 100644
index 0000000000..782d4bbca6
--- /dev/null
+++ b/deps/v8/src/ast-expression-visitor.cc
@@ -0,0 +1,356 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ast-expression-visitor.h"
+
+#include "src/ast.h"
+#include "src/codegen.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define RECURSE(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ call; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+#define RECURSE_EXPRESSION(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ ++depth_; \
+ call; \
+ --depth_; \
+ if (HasStackOverflow()) return; \
+ } while (false)
+
+
+AstExpressionVisitor::AstExpressionVisitor(Isolate* isolate, Zone* zone,
+ FunctionLiteral* root)
+ : root_(root), depth_(0) {
+ InitializeAstVisitor(isolate, zone);
+}
+
+
+void AstExpressionVisitor::Run() { RECURSE(VisitFunctionLiteral(root_)); }
+
+
+void AstExpressionVisitor::VisitVariableDeclaration(VariableDeclaration* decl) {
+}
+
+
+void AstExpressionVisitor::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ RECURSE(Visit(decl->fun()));
+}
+
+
+void AstExpressionVisitor::VisitImportDeclaration(ImportDeclaration* decl) {}
+
+
+void AstExpressionVisitor::VisitExportDeclaration(ExportDeclaration* decl) {}
+
+
+void AstExpressionVisitor::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ if (stmt->IsJump()) break;
+ }
+}
+
+
+void AstExpressionVisitor::VisitBlock(Block* stmt) {
+ RECURSE(VisitStatements(stmt->statements()));
+}
+
+
+void AstExpressionVisitor::VisitExpressionStatement(ExpressionStatement* stmt) {
+ RECURSE(Visit(stmt->expression()));
+}
+
+
+void AstExpressionVisitor::VisitEmptyStatement(EmptyStatement* stmt) {}
+
+
+void AstExpressionVisitor::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ RECURSE(Visit(stmt->statement()));
+}
+
+
+void AstExpressionVisitor::VisitIfStatement(IfStatement* stmt) {
+ RECURSE(Visit(stmt->condition()));
+ RECURSE(Visit(stmt->then_statement()));
+ RECURSE(Visit(stmt->else_statement()));
+}
+
+
+void AstExpressionVisitor::VisitContinueStatement(ContinueStatement* stmt) {}
+
+
+void AstExpressionVisitor::VisitBreakStatement(BreakStatement* stmt) {}
+
+
+void AstExpressionVisitor::VisitReturnStatement(ReturnStatement* stmt) {
+ RECURSE(Visit(stmt->expression()));
+}
+
+
+void AstExpressionVisitor::VisitWithStatement(WithStatement* stmt) {
+ RECURSE(stmt->expression());
+ RECURSE(stmt->statement());
+}
+
+
+void AstExpressionVisitor::VisitSwitchStatement(SwitchStatement* stmt) {
+ RECURSE(Visit(stmt->tag()));
+
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (!clause->is_default()) {
+ Expression* label = clause->label();
+ RECURSE(Visit(label));
+ }
+ ZoneList<Statement*>* stmts = clause->statements();
+ RECURSE(VisitStatements(stmts));
+ }
+}
+
+
+void AstExpressionVisitor::VisitCaseClause(CaseClause* clause) {
+ UNREACHABLE();
+}
+
+
+void AstExpressionVisitor::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ RECURSE(Visit(stmt->body()));
+ RECURSE(Visit(stmt->cond()));
+}
+
+
+void AstExpressionVisitor::VisitWhileStatement(WhileStatement* stmt) {
+ RECURSE(Visit(stmt->cond()));
+ RECURSE(Visit(stmt->body()));
+}
+
+
+void AstExpressionVisitor::VisitForStatement(ForStatement* stmt) {
+ if (stmt->init() != NULL) {
+ RECURSE(Visit(stmt->init()));
+ }
+ if (stmt->cond() != NULL) {
+ RECURSE(Visit(stmt->cond()));
+ }
+ if (stmt->next() != NULL) {
+ RECURSE(Visit(stmt->next()));
+ }
+ RECURSE(Visit(stmt->body()));
+}
+
+
+void AstExpressionVisitor::VisitForInStatement(ForInStatement* stmt) {
+ RECURSE(Visit(stmt->enumerable()));
+ RECURSE(Visit(stmt->body()));
+}
+
+
+void AstExpressionVisitor::VisitForOfStatement(ForOfStatement* stmt) {
+ RECURSE(Visit(stmt->iterable()));
+ RECURSE(Visit(stmt->body()));
+}
+
+
+void AstExpressionVisitor::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ RECURSE(Visit(stmt->try_block()));
+ RECURSE(Visit(stmt->catch_block()));
+}
+
+
+void AstExpressionVisitor::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ RECURSE(Visit(stmt->try_block()));
+ RECURSE(Visit(stmt->finally_block()));
+}
+
+
+void AstExpressionVisitor::VisitDebuggerStatement(DebuggerStatement* stmt) {}
+
+
+void AstExpressionVisitor::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Scope* scope = expr->scope();
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
+ RECURSE_EXPRESSION(VisitStatements(expr->body()));
+}
+
+
+void AstExpressionVisitor::VisitNativeFunctionLiteral(
+ NativeFunctionLiteral* expr) {}
+
+
+void AstExpressionVisitor::VisitConditional(Conditional* expr) {
+ RECURSE(Visit(expr->condition()));
+ RECURSE(Visit(expr->then_expression()));
+ RECURSE(Visit(expr->else_expression()));
+}
+
+
+void AstExpressionVisitor::VisitVariableProxy(VariableProxy* expr) {
+ VisitExpression(expr);
+}
+
+
+void AstExpressionVisitor::VisitLiteral(Literal* expr) {
+ VisitExpression(expr);
+}
+
+
+void AstExpressionVisitor::VisitRegExpLiteral(RegExpLiteral* expr) {
+ VisitExpression(expr);
+}
+
+
+void AstExpressionVisitor::VisitObjectLiteral(ObjectLiteral* expr) {
+ VisitExpression(expr);
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ RECURSE_EXPRESSION(Visit(prop->value()));
+ }
+}
+
+
+void AstExpressionVisitor::VisitArrayLiteral(ArrayLiteral* expr) {
+ VisitExpression(expr);
+ ZoneList<Expression*>* values = expr->values();
+ for (int i = 0; i < values->length(); ++i) {
+ Expression* value = values->at(i);
+ RECURSE_EXPRESSION(Visit(value));
+ }
+}
+
+
+void AstExpressionVisitor::VisitAssignment(Assignment* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->target()));
+ RECURSE_EXPRESSION(Visit(expr->value()));
+}
+
+
+void AstExpressionVisitor::VisitYield(Yield* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->generator_object()));
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+
+void AstExpressionVisitor::VisitThrow(Throw* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->exception()));
+}
+
+
+void AstExpressionVisitor::VisitProperty(Property* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->obj()));
+ RECURSE_EXPRESSION(Visit(expr->key()));
+}
+
+
+void AstExpressionVisitor::VisitCall(Call* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE_EXPRESSION(Visit(arg));
+ }
+}
+
+
+void AstExpressionVisitor::VisitCallNew(CallNew* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE_EXPRESSION(Visit(arg));
+ }
+}
+
+
+void AstExpressionVisitor::VisitCallRuntime(CallRuntime* expr) {
+ VisitExpression(expr);
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE_EXPRESSION(Visit(arg));
+ }
+}
+
+
+void AstExpressionVisitor::VisitUnaryOperation(UnaryOperation* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+
+void AstExpressionVisitor::VisitCountOperation(CountOperation* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->expression()));
+}
+
+
+void AstExpressionVisitor::VisitBinaryOperation(BinaryOperation* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->left()));
+ RECURSE_EXPRESSION(Visit(expr->right()));
+}
+
+
+void AstExpressionVisitor::VisitCompareOperation(CompareOperation* expr) {
+ VisitExpression(expr);
+ RECURSE_EXPRESSION(Visit(expr->left()));
+ RECURSE_EXPRESSION(Visit(expr->right()));
+}
+
+
+void AstExpressionVisitor::VisitThisFunction(ThisFunction* expr) {
+ VisitExpression(expr);
+}
+
+
+void AstExpressionVisitor::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+}
+
+
+void AstExpressionVisitor::VisitClassLiteral(ClassLiteral* expr) {}
+
+
+void AstExpressionVisitor::VisitSpread(Spread* expr) {}
+
+
+void AstExpressionVisitor::VisitEmptyParentheses(EmptyParentheses* expr) {}
+
+
+void AstExpressionVisitor::VisitSuperPropertyReference(
+ SuperPropertyReference* expr) {}
+
+
+void AstExpressionVisitor::VisitSuperCallReference(SuperCallReference* expr) {}
+}
+
+
+} // namespace v8::internal
diff --git a/deps/v8/src/ast-expression-visitor.h b/deps/v8/src/ast-expression-visitor.h
new file mode 100644
index 0000000000..43b34bac79
--- /dev/null
+++ b/deps/v8/src/ast-expression-visitor.h
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_EXPRESSION_VISITOR_H_
+#define V8_AST_EXPRESSION_VISITOR_H_
+
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/effects.h"
+#include "src/scopes.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// A Visitor over a CompilationInfo's AST that invokes
+// VisitExpression on each expression node.
+
+class AstExpressionVisitor : public AstVisitor {
+ public:
+ AstExpressionVisitor(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+ void Run();
+
+ protected:
+ virtual void VisitExpression(Expression* expression) = 0;
+ int depth() { return depth_; }
+
+ private:
+ void VisitDeclarations(ZoneList<Declaration*>* d) override;
+ void VisitStatements(ZoneList<Statement*>* s) override;
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ FunctionLiteral* root_;
+ int depth_;
+
+ DISALLOW_COPY_AND_ASSIGN(AstExpressionVisitor);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_AST_EXPRESSION_VISITOR_H_
diff --git a/deps/v8/src/ast-literal-reindexer.cc b/deps/v8/src/ast-literal-reindexer.cc
index 860a3961f0..e5729c7818 100644
--- a/deps/v8/src/ast-literal-reindexer.cc
+++ b/deps/v8/src/ast-literal-reindexer.cc
@@ -24,6 +24,12 @@ void AstLiteralReindexer::VisitExportDeclaration(ExportDeclaration* node) {
void AstLiteralReindexer::VisitEmptyStatement(EmptyStatement* node) {}
+void AstLiteralReindexer::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ Visit(node->statement());
+}
+
+
void AstLiteralReindexer::VisitContinueStatement(ContinueStatement* node) {}
@@ -174,6 +180,9 @@ void AstLiteralReindexer::VisitSpread(Spread* node) {
}
+void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
+
+
void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
Visit(node->each());
Visit(node->enumerable());
diff --git a/deps/v8/src/ast-literal-reindexer.h b/deps/v8/src/ast-literal-reindexer.h
index 9e445129a5..2fe920b7c4 100644
--- a/deps/v8/src/ast-literal-reindexer.h
+++ b/deps/v8/src/ast-literal-reindexer.h
@@ -17,6 +17,7 @@ class AstLiteralReindexer final : public AstVisitor {
int count() const { return next_index_; }
void Reindex(Expression* pattern);
+ int NextIndex() { return next_index_++; }
private:
#define DEFINE_VISIT(type) virtual void Visit##type(type* node) override;
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast-numbering.cc
index 4becade296..55eaacd1f5 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast-numbering.cc
@@ -65,21 +65,8 @@ class AstNumberingVisitor final : public AstVisitor {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
- FeedbackVectorRequirements reqs =
- node->ComputeFeedbackRequirements(isolate(), &ic_slot_cache_);
- if (reqs.slots() > 0) {
- node->SetFirstFeedbackSlot(FeedbackVectorSlot(properties_.slots()));
- properties_.increase_slots(reqs.slots());
- }
- if (reqs.ic_slots() > 0) {
- int ic_slots = properties_.ic_slots();
- node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots),
- &ic_slot_cache_);
- properties_.increase_ic_slots(reqs.ic_slots());
- for (int i = 0; i < reqs.ic_slots(); i++) {
- properties_.SetKind(ic_slots + i, node->FeedbackICSlotKind(i));
- }
- }
+ node->AssignFeedbackVectorSlots(isolate(), properties_.get_spec(),
+ &ic_slot_cache_);
}
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
@@ -113,6 +100,13 @@ void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
}
+void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ IncrementNodeCount();
+ Visit(node->statement());
+}
+
+
void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
IncrementNodeCount();
}
@@ -371,6 +365,11 @@ void AstNumberingVisitor::VisitSpread(Spread* node) {
}
+void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
+ UNREACHABLE();
+}
+
+
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index ca36ac8ea1..69fc6cc2f4 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -241,48 +241,35 @@ class AstValue : public ZoneObject {
// For generating constants.
-#define STRING_CONSTANTS(F) \
- F(anonymous_function, "(anonymous function)") \
- F(arguments, "arguments") \
- F(concat_iterable_to_array, "$concatIterableToArray") \
- F(constructor, "constructor") \
- F(default, "default") \
- F(done, "done") \
- F(dot, ".") \
- F(dot_for, ".for") \
- F(dot_generator, ".generator") \
- F(dot_generator_object, ".generator_object") \
- F(dot_iterator, ".iterator") \
- F(dot_module, ".module") \
- F(dot_result, ".result") \
- F(empty, "") \
- F(eval, "eval") \
- F(get_template_callsite, "$getTemplateCallSite") \
- F(initialize_const_global, "initializeConstGlobal") \
- F(initialize_var_global, "initializeVarGlobal") \
- F(is_construct_call, "_IsConstructCall") \
- F(is_spec_object, "_IsSpecObject") \
- F(let, "let") \
- F(make_reference_error, "MakeReferenceError") \
- F(make_syntax_error, "MakeSyntaxError") \
- F(make_type_error, "MakeTypeError") \
- F(native, "native") \
- F(new_target, ".new.target") \
- F(next, "next") \
- F(proto, "__proto__") \
- F(prototype, "prototype") \
- F(reflect_apply, "$reflectApply") \
- F(reflect_construct, "$reflectConstruct") \
- F(spread_arguments, "$spreadArguments") \
- F(spread_iterable, "$spreadIterable") \
- F(this, "this") \
- F(this_function, ".this_function") \
- F(throw_iterator_result_not_an_object, "ThrowIteratorResultNotAnObject") \
- F(to_string, "$toString") \
- F(undefined, "undefined") \
- F(use_asm, "use asm") \
- F(use_strong, "use strong") \
- F(use_strict, "use strict") \
+#define STRING_CONSTANTS(F) \
+ F(anonymous_function, "(anonymous function)") \
+ F(arguments, "arguments") \
+ F(constructor, "constructor") \
+ F(default, "default") \
+ F(done, "done") \
+ F(dot, ".") \
+ F(dot_for, ".for") \
+ F(dot_generator, ".generator") \
+ F(dot_generator_object, ".generator_object") \
+ F(dot_iterator, ".iterator") \
+ F(dot_module, ".module") \
+ F(dot_result, ".result") \
+ F(dot_switch_tag, ".switch_tag") \
+ F(empty, "") \
+ F(eval, "eval") \
+ F(let, "let") \
+ F(native, "native") \
+ F(new_target, ".new.target") \
+ F(next, "next") \
+ F(proto, "__proto__") \
+ F(prototype, "prototype") \
+ F(rest_parameter, ".rest_parameter") \
+ F(this, "this") \
+ F(this_function, ".this_function") \
+ F(undefined, "undefined") \
+ F(use_asm, "use asm") \
+ F(use_strong, "use strong") \
+ F(use_strict, "use strict") \
F(value, "value")
#define OTHER_CONSTANTS(F) \
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 71a34b65fa..3292b1d50b 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -98,17 +98,9 @@ void VariableProxy::BindTo(Variable* var) {
}
-void VariableProxy::SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) {
- variable_feedback_slot_ = slot;
- if (var()->IsUnallocated()) {
- cache->Put(var(), slot);
- }
-}
-
-
-FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
+void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
@@ -117,45 +109,39 @@ FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
if (entry != NULL) {
variable_feedback_slot_ = FeedbackVectorICSlot(
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
- return FeedbackVectorRequirements(0, 0);
+ return;
}
}
- return FeedbackVectorRequirements(0, 1);
+ variable_feedback_slot_ = spec->AddLoadICSlot();
+ if (var()->IsUnallocated()) {
+ cache->Put(var(), variable_feedback_slot_);
+ }
}
- return FeedbackVectorRequirements(0, 0);
}
-static int GetStoreICSlots(Expression* expr) {
- int ic_slots = 0;
+static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
+ FeedbackVectorICSlot* out_slot) {
if (FLAG_vector_stores) {
Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
if ((assign_type == VARIABLE &&
expr->AsVariableProxy()->var()->IsUnallocated()) ||
assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
- ic_slots++;
+ // TODO(ishell): consider using ICSlotCache for variables here.
+ FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
+ ? FeedbackVectorSlotKind::KEYED_STORE_IC
+ : FeedbackVectorSlotKind::STORE_IC;
+ *out_slot = spec->AddSlot(kind);
}
}
- return ic_slots;
}
-static Code::Kind GetStoreICKind(Expression* expr) {
- LhsKind assign_type = Property::GetAssignType(expr->AsProperty());
- return assign_type == KEYED_PROPERTY ? Code::KEYED_STORE_IC : Code::STORE_IC;
-}
-
-
-FeedbackVectorRequirements ForEachStatement::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- int ic_slots = GetStoreICSlots(each());
- return FeedbackVectorRequirements(0, ic_slots);
-}
-
-
-Code::Kind ForEachStatement::FeedbackICSlotKind(int index) {
- return GetStoreICKind(each());
+void ForEachStatement::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
+ AssignVectorSlots(each(), spec, &each_slot_);
}
@@ -171,27 +157,17 @@ Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
slot_(FeedbackVectorICSlot::Invalid()) {}
-FeedbackVectorRequirements Assignment::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- int ic_slots = GetStoreICSlots(target());
- return FeedbackVectorRequirements(0, ic_slots);
-}
-
-
-Code::Kind Assignment::FeedbackICSlotKind(int index) {
- return GetStoreICKind(target());
-}
-
-
-FeedbackVectorRequirements CountOperation::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- int ic_slots = GetStoreICSlots(expression());
- return FeedbackVectorRequirements(0, ic_slots);
+void Assignment::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
+ AssignVectorSlots(target(), spec, &slot_);
}
-Code::Kind CountOperation::FeedbackICSlotKind(int index) {
- return GetStoreICKind(expression());
+void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
+ AssignVectorSlots(expression(), spec, &slot_);
}
@@ -251,6 +227,7 @@ ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
bool is_computed_name)
: key_(key),
value_(value),
+ slot_(FeedbackVectorICSlot::Invalid()),
kind_(kind),
emit_store_(true),
is_static_(is_static),
@@ -263,6 +240,7 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
bool is_computed_name)
: key_(key),
value_(value),
+ slot_(FeedbackVectorICSlot::Invalid()),
emit_store_(true),
is_static_(is_static),
is_computed_name_(is_computed_name) {
@@ -280,42 +258,24 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
-FeedbackVectorRequirements ClassLiteral::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- if (!FLAG_vector_stores) return FeedbackVectorRequirements(0, 0);
+void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
+ if (!FLAG_vector_stores) return;
// This logic that computes the number of slots needed for vector store
// ICs must mirror FullCodeGenerator::VisitClassLiteral.
- int ic_slots = 0;
+ if (NeedsProxySlot()) {
+ slot_ = spec->AddStoreICSlot();
+ }
+
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
-
Expression* value = property->value();
- if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
- }
-
- if (scope() != NULL && class_variable_proxy()->var()->IsUnallocated()) {
- ic_slots++;
- }
-
-#ifdef DEBUG
- // FullCodeGenerator::VisitClassLiteral verifies that it consumes slot_count_
- // slots.
- slot_count_ = ic_slots;
-#endif
- return FeedbackVectorRequirements(0, ic_slots);
-}
-
-
-FeedbackVectorICSlot ClassLiteral::SlotForHomeObject(Expression* value,
- int* slot_index) const {
- if (FLAG_vector_stores && FunctionLiteral::NeedsHomeObject(value)) {
- DCHECK(slot_index != NULL && *slot_index >= 0 && *slot_index < slot_count_);
- FeedbackVectorICSlot slot = GetNthSlot(*slot_index);
- *slot_index += 1;
- return slot;
+ if (FunctionLiteral::NeedsHomeObject(value)) {
+ property->set_slot(spec->AddStoreICSlot());
+ }
}
- return FeedbackVectorICSlot::Invalid();
}
@@ -336,53 +296,67 @@ bool ObjectLiteral::Property::emit_store() {
}
-FeedbackVectorRequirements ObjectLiteral::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- if (!FLAG_vector_stores) return FeedbackVectorRequirements(0, 0);
+void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
+ if (!FLAG_vector_stores) return;
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
- int ic_slots = 0;
- for (int i = 0; i < properties()->length(); i++) {
- ObjectLiteral::Property* property = properties()->at(i);
+ int property_index = 0;
+ for (; property_index < properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = properties()->at(property_index);
+ if (property->is_computed_name()) break;
if (property->IsCompileTimeValue()) continue;
+ Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
- if (property->is_computed_name() &&
- property->kind() != ObjectLiteral::Property::PROTOTYPE) {
- if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
- } else if (property->emit_store()) {
- if (property->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL ||
- property->kind() == ObjectLiteral::Property::COMPUTED) {
- Literal* key = property->key()->AsLiteral();
- if (key->value()->IsInternalizedString()) ic_slots++;
- if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
- } else if (property->kind() == ObjectLiteral::Property::GETTER ||
- property->kind() == ObjectLiteral::Property::SETTER) {
- // We might need a slot for the home object.
- if (FunctionLiteral::NeedsHomeObject(value)) ic_slots++;
- }
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ UNREACHABLE();
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ // Fall through.
+ case ObjectLiteral::Property::COMPUTED:
+ // It is safe to use [[Put]] here because the boilerplate already
+ // contains computed properties with an uninitialized value.
+ if (key->value()->IsInternalizedString()) {
+ if (property->emit_store()) {
+ property->set_slot(spec->AddStoreICSlot());
+ if (FunctionLiteral::NeedsHomeObject(value)) {
+ spec->AddStoreICSlot();
+ }
+ }
+ break;
+ }
+ if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
+ property->set_slot(spec->AddStoreICSlot());
+ }
+ break;
+ case ObjectLiteral::Property::PROTOTYPE:
+ break;
+ case ObjectLiteral::Property::GETTER:
+ if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
+ property->set_slot(spec->AddStoreICSlot());
+ }
+ break;
+ case ObjectLiteral::Property::SETTER:
+ if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
+ property->set_slot(spec->AddStoreICSlot());
+ }
+ break;
}
}
-#ifdef DEBUG
- // FullCodeGenerator::VisitObjectLiteral verifies that it consumes slot_count_
- // slots.
- slot_count_ = ic_slots;
-#endif
- return FeedbackVectorRequirements(0, ic_slots);
-}
-
+ for (; property_index < properties()->length(); property_index++) {
+ ObjectLiteral::Property* property = properties()->at(property_index);
-FeedbackVectorICSlot ObjectLiteral::SlotForHomeObject(Expression* value,
- int* slot_index) const {
- if (FLAG_vector_stores && FunctionLiteral::NeedsHomeObject(value)) {
- DCHECK(slot_index != NULL && *slot_index >= 0 && *slot_index < slot_count_);
- FeedbackVectorICSlot slot = GetNthSlot(*slot_index);
- *slot_index += 1;
- return slot;
+ Expression* value = property->value();
+ if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
+ if (FunctionLiteral::NeedsHomeObject(value)) {
+ property->set_slot(spec->AddStoreICSlot());
+ }
+ }
}
- return FeedbackVectorICSlot::Invalid();
}
@@ -745,11 +719,14 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
}
-FeedbackVectorRequirements Call::ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- int ic_slots = IsUsingCallFeedbackICSlot(isolate) ? 1 : 0;
- int slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
- return FeedbackVectorRequirements(slots, ic_slots);
+void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {
+ if (IsUsingCallFeedbackICSlot(isolate)) {
+ ic_slot_ = spec->AddCallICSlot();
+ }
+ if (IsUsingCallFeedbackSlot(isolate)) {
+ slot_ = spec->AddStubSlot();
+ }
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 10dbdb9109..4764918849 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -44,23 +44,24 @@ namespace internal {
V(ImportDeclaration) \
V(ExportDeclaration)
-#define STATEMENT_NODE_LIST(V) \
- V(Block) \
- V(ExpressionStatement) \
- V(EmptyStatement) \
- V(IfStatement) \
- V(ContinueStatement) \
- V(BreakStatement) \
- V(ReturnStatement) \
- V(WithStatement) \
- V(SwitchStatement) \
- V(DoWhileStatement) \
- V(WhileStatement) \
- V(ForStatement) \
- V(ForInStatement) \
- V(ForOfStatement) \
- V(TryCatchStatement) \
- V(TryFinallyStatement) \
+#define STATEMENT_NODE_LIST(V) \
+ V(Block) \
+ V(ExpressionStatement) \
+ V(EmptyStatement) \
+ V(SloppyBlockFunctionStatement) \
+ V(IfStatement) \
+ V(ContinueStatement) \
+ V(BreakStatement) \
+ V(ReturnStatement) \
+ V(WithStatement) \
+ V(SwitchStatement) \
+ V(DoWhileStatement) \
+ V(WhileStatement) \
+ V(ForStatement) \
+ V(ForInStatement) \
+ V(ForOfStatement) \
+ V(TryCatchStatement) \
+ V(TryFinallyStatement) \
V(DebuggerStatement)
#define EXPRESSION_NODE_LIST(V) \
@@ -88,7 +89,8 @@ namespace internal {
V(ThisFunction) \
V(SuperPropertyReference) \
V(SuperCallReference) \
- V(CaseClause)
+ V(CaseClause) \
+ V(EmptyParentheses)
#define AST_NODE_LIST(V) \
DECLARATION_NODE_LIST(V) \
@@ -136,20 +138,6 @@ typedef ZoneList<Handle<Object>> ZoneObjectList;
friend class AstNodeFactory;
-class FeedbackVectorRequirements {
- public:
- FeedbackVectorRequirements(int slots, int ic_slots)
- : slots_(slots), ic_slots_(ic_slots) {}
-
- int slots() const { return slots_; }
- int ic_slots() const { return ic_slots_; }
-
- private:
- int slots_;
- int ic_slots_;
-};
-
-
class ICSlotCache {
public:
explicit ICSlotCache(Zone* zone)
@@ -190,18 +178,13 @@ class AstProperties final BASE_EMBEDDED {
int node_count() { return node_count_; }
void add_node_count(int count) { node_count_ += count; }
- int slots() const { return spec_.slots(); }
- void increase_slots(int count) { spec_.increase_slots(count); }
-
- int ic_slots() const { return spec_.ic_slots(); }
- void increase_ic_slots(int count) { spec_.increase_ic_slots(count); }
- void SetKind(int ic_slot, Code::Kind kind) { spec_.SetKind(ic_slot, kind); }
- const ZoneFeedbackVectorSpec* get_spec() const { return &spec_; }
+ const FeedbackVectorSpec* get_spec() const { return &spec_; }
+ FeedbackVectorSpec* get_spec() { return &spec_; }
private:
Flags flags_;
int node_count_;
- ZoneFeedbackVectorSpec spec_;
+ FeedbackVectorSpec spec_;
};
DEFINE_OPERATORS_FOR_FLAGS(AstProperties::Flags)
@@ -245,20 +228,9 @@ class AstNode: public ZoneObject {
// node types which don't actually have this. Note that this is conceptually
// not really nice, but multiple inheritance would introduce yet another
// vtable entry per node, something we don't want for space reasons.
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) {
- return FeedbackVectorRequirements(0, 0);
- }
- virtual void SetFirstFeedbackSlot(FeedbackVectorSlot slot) { UNREACHABLE(); }
- virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) {
- UNREACHABLE();
- }
- // Each ICSlot stores a kind of IC which the participating node should know.
- virtual Code::Kind FeedbackICSlotKind(int index) {
- UNREACHABLE();
- return Code::NUMBER_OF_KINDS;
- }
+ virtual void AssignFeedbackVectorSlots(Isolate* isolate,
+ FeedbackVectorSpec* spec,
+ ICSlotCache* cache) {}
private:
// Hidden to prevent accidental usage. It would have to load the
@@ -405,7 +377,7 @@ class Expression : public AstNode {
Expression(Zone* zone, int pos)
: AstNode(pos),
base_id_(BailoutId::None().ToInt()),
- bounds_(Bounds::Unbounded(zone)),
+ bounds_(Bounds::Unbounded()),
bit_field_(0) {}
static int parent_num_ids() { return 0; }
void set_to_boolean_types(uint16_t types) {
@@ -804,13 +776,8 @@ class ForEachStatement : public IterationStatement {
Expression* each() const { return each_; }
Expression* subject() const { return subject_; }
- FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- each_slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
FeedbackVectorICSlot EachFeedbackSlot() const { return each_slot_; }
protected:
@@ -836,15 +803,10 @@ class ForInStatement final : public ForEachStatement {
}
// Type feedback information.
- FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override {
- FeedbackVectorRequirements base =
- ForEachStatement::ComputeFeedbackRequirements(isolate, cache);
- DCHECK(base.slots() == 0 && base.ic_slots() <= 1);
- return FeedbackVectorRequirements(1, base.ic_slots());
- }
- void SetFirstFeedbackSlot(FeedbackVectorSlot slot) override {
- for_in_feedback_slot_ = slot;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override {
+ ForEachStatement::AssignFeedbackVectorSlots(isolate, spec, cache);
+ for_in_feedback_slot_ = spec->AddStubSlot();
}
FeedbackVectorSlot ForInFeedbackSlot() {
@@ -1267,6 +1229,29 @@ class EmptyStatement final : public Statement {
};
+// Delegates to another statement, which may be overwritten.
+// This was introduced to implement ES2015 Annex B3.3 for conditionally making
+// sloppy-mode block-scoped functions have a var binding, which is changed
+// from one statement to another during parsing.
+class SloppyBlockFunctionStatement final : public Statement {
+ public:
+ DECLARE_NODE_TYPE(SloppyBlockFunctionStatement)
+
+ Statement* statement() const { return statement_; }
+ void set_statement(Statement* statement) { statement_ = statement; }
+ Scope* scope() const { return scope_; }
+
+ private:
+ SloppyBlockFunctionStatement(Zone* zone, Statement* statement, Scope* scope)
+ : Statement(zone, RelocInfo::kNoPosition),
+ statement_(statement),
+ scope_(scope) {}
+
+ Statement* statement_;
+ Scope* const scope_;
+};
+
+
class Literal final : public Expression {
public:
DECLARE_NODE_TYPE(Literal)
@@ -1398,6 +1383,14 @@ class ObjectLiteralProperty final : public ZoneObject {
bool is_static() const { return is_static_; }
bool is_computed_name() const { return is_computed_name_; }
+ FeedbackVectorICSlot GetSlot(int offset = 0) const {
+ if (slot_.IsInvalid()) return slot_;
+ int slot = slot_.ToInt();
+ return FeedbackVectorICSlot(slot + offset);
+ }
+ FeedbackVectorICSlot slot() const { return slot_; }
+ void set_slot(FeedbackVectorICSlot slot) { slot_ = slot; }
+
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
protected:
@@ -1412,6 +1405,7 @@ class ObjectLiteralProperty final : public ZoneObject {
private:
Expression* key_;
Expression* value_;
+ FeedbackVectorICSlot slot_;
Kind kind_;
bool emit_store_;
bool is_static_;
@@ -1476,8 +1470,8 @@ class ObjectLiteral final : public MaterializedLiteral {
struct Accessors: public ZoneObject {
Accessors() : getter(NULL), setter(NULL) {}
- Expression* getter;
- Expression* setter;
+ ObjectLiteralProperty* getter;
+ ObjectLiteralProperty* setter;
};
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
@@ -1491,25 +1485,8 @@ class ObjectLiteral final : public MaterializedLiteral {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
- FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override { return Code::STORE_IC; }
- FeedbackVectorICSlot GetNthSlot(int n) const {
- return FeedbackVectorICSlot(slot_.ToInt() + n);
- }
-
- // If value needs a home object, returns a valid feedback vector ic slot
- // given by slot_index, and increments slot_index.
- FeedbackVectorICSlot SlotForHomeObject(Expression* value,
- int* slot_index) const;
-
-#ifdef DEBUG
- int slot_count() const { return slot_count_; }
-#endif
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
protected:
ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
@@ -1522,9 +1499,6 @@ class ObjectLiteral final : public MaterializedLiteral {
has_elements_(false),
may_store_doubles_(false),
has_function_(has_function),
-#ifdef DEBUG
- slot_count_(0),
-#endif
slot_(FeedbackVectorICSlot::Invalid()) {
}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
@@ -1538,11 +1512,6 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_elements_;
bool may_store_doubles_;
bool has_function_;
-#ifdef DEBUG
- // slot_count_ helps validate that the logic to allocate ic slots and the
- // logic to use them are in sync.
- int slot_count_;
-#endif
FeedbackVectorICSlot slot_;
};
@@ -1686,12 +1655,9 @@ class VariableProxy final : public Expression {
return var()->IsUnallocated() || var()->IsLookupSlot();
}
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override;
- Code::Kind FeedbackICSlotKind(int index) override { return Code::LOAD_IC; }
FeedbackVectorICSlot VariableFeedbackSlot() {
return variable_feedback_slot_;
}
@@ -1788,16 +1754,12 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(0, 1);
- }
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- property_feedback_slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override {
- return key()->IsPropertyName() ? Code::LOAD_IC : Code::KEYED_LOAD_IC;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override {
+ FeedbackVectorSlotKind kind = key()->IsPropertyName()
+ ? FeedbackVectorSlotKind::LOAD_IC
+ : FeedbackVectorSlotKind::KEYED_LOAD_IC;
+ property_feedback_slot_ = spec->AddSlot(kind);
}
FeedbackVectorICSlot PropertyFeedbackSlot() const {
@@ -1846,14 +1808,8 @@ class Call final : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- ic_slot_ = slot;
- }
- void SetFirstFeedbackSlot(FeedbackVectorSlot slot) override { slot_ = slot; }
- Code::Kind FeedbackICSlotKind(int index) override { return Code::CALL_IC; }
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
FeedbackVectorSlot CallFeedbackSlot() const { return slot_; }
@@ -1963,22 +1919,15 @@ class CallNew final : public Expression {
ZoneList<Expression*>* arguments() const { return arguments_; }
// Type feedback information.
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(FLAG_pretenuring_call_new ? 2 : 1, 0);
- }
- void SetFirstFeedbackSlot(FeedbackVectorSlot slot) override {
- callnew_feedback_slot_ = slot;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override {
+ callnew_feedback_slot_ = spec->AddStubSlot();
}
FeedbackVectorSlot CallNewFeedbackSlot() {
DCHECK(!callnew_feedback_slot_.IsInvalid());
return callnew_feedback_slot_;
}
- FeedbackVectorSlot AllocationSiteFeedbackSlot() {
- DCHECK(FLAG_pretenuring_call_new);
- return CallNewFeedbackSlot().next();
- }
bool IsMonomorphic() override { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
@@ -2031,51 +1980,45 @@ class CallRuntime final : public Expression {
public:
DECLARE_NODE_TYPE(CallRuntime)
- Handle<String> name() const { return raw_name_->string(); }
- const AstRawString* raw_name() const { return raw_name_; }
- const Runtime::Function* function() const { return function_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == NULL; }
- // Type feedback information.
- bool HasCallRuntimeFeedbackSlot() const { return is_jsruntime(); }
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(0, HasCallRuntimeFeedbackSlot() ? 1 : 0);
+ int context_index() const {
+ DCHECK(is_jsruntime());
+ return context_index_;
}
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- callruntime_feedback_slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override { return Code::LOAD_IC; }
-
- FeedbackVectorICSlot CallRuntimeFeedbackSlot() {
- DCHECK(!HasCallRuntimeFeedbackSlot() ||
- !callruntime_feedback_slot_.IsInvalid());
- return callruntime_feedback_slot_;
+ const Runtime::Function* function() const {
+ DCHECK(!is_jsruntime());
+ return function_;
}
static int num_ids() { return parent_num_ids() + 1; }
BailoutId CallId() { return BailoutId(local_id(0)); }
+ const char* debug_name() {
+ return is_jsruntime() ? "(context function)" : function_->name;
+ }
+
protected:
- CallRuntime(Zone* zone, const AstRawString* name,
- const Runtime::Function* function,
+ CallRuntime(Zone* zone, const Runtime::Function* function,
ZoneList<Expression*>* arguments, int pos)
+ : Expression(zone, pos), function_(function), arguments_(arguments) {}
+
+ CallRuntime(Zone* zone, int context_index, ZoneList<Expression*>* arguments,
+ int pos)
: Expression(zone, pos),
- raw_name_(name),
- function_(function),
- arguments_(arguments),
- callruntime_feedback_slot_(FeedbackVectorICSlot::Invalid()) {}
+ function_(NULL),
+ context_index_(context_index),
+ arguments_(arguments) {}
+
static int parent_num_ids() { return Expression::num_ids(); }
private:
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
- const AstRawString* raw_name_;
const Runtime::Function* function_;
+ int context_index_;
ZoneList<Expression*>* arguments_;
- FeedbackVectorICSlot callruntime_feedback_slot_;
};
@@ -2207,13 +2150,8 @@ class CountOperation final : public Expression {
return TypeFeedbackId(local_id(3));
}
- FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
FeedbackVectorICSlot CountSlot() const { return slot_; }
protected:
@@ -2233,8 +2171,8 @@ class CountOperation final : public Expression {
class IsPrefixField : public BitField16<bool, 0, 1> {};
class KeyTypeField : public BitField16<IcCheckType, 1, 1> {};
- class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 4> {};
- class TokenField : public BitField16<Token::Value, 6, 8> {};
+ class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 3> {};
+ class TokenField : public BitField16<Token::Value, 5, 8> {};
// Starts with 16-bit field, which should get packed together with
// Expression's trailing 16-bit field.
@@ -2385,13 +2323,8 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
- FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
FeedbackVectorICSlot AssignmentSlot() const { return slot_; }
protected:
@@ -2404,8 +2337,8 @@ class Assignment final : public Expression {
class IsUninitializedField : public BitField16<bool, 0, 1> {};
class KeyTypeField : public BitField16<IcCheckType, 1, 1> {};
- class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 4> {};
- class TokenField : public BitField16<Token::Value, 6, 8> {};
+ class StoreModeField : public BitField16<KeyedAccessStoreMode, 2, 3> {};
+ class TokenField : public BitField16<Token::Value, 5, 8> {};
// Starts with 16-bit field, which should get packed together with
// Expression's trailing 16-bit field.
@@ -2435,16 +2368,12 @@ class Yield final : public Expression {
// Type feedback information.
bool HasFeedbackSlots() const { return yield_kind() == kDelegating; }
- virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override {
- return FeedbackVectorRequirements(0, HasFeedbackSlots() ? 3 : 0);
- }
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- yield_first_feedback_slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override {
- return index == 0 ? Code::KEYED_LOAD_IC : Code::LOAD_IC;
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override {
+ if (HasFeedbackSlots()) {
+ yield_first_feedback_slot_ = spec->AddKeyedLoadICSlot();
+ spec->AddLoadICSlots(2);
+ }
}
FeedbackVectorICSlot KeyedLoadFeedbackSlot() {
@@ -2613,7 +2542,7 @@ class FunctionLiteral final : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
- const ZoneFeedbackVectorSpec* feedback_vector_spec() const {
+ const FeedbackVectorSpec* feedback_vector_spec() const {
return ast_properties_.get_spec();
}
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
@@ -2712,25 +2641,15 @@ class ClassLiteral final : public Expression {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
- FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate, const ICSlotCache* cache) override;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
- ICSlotCache* cache) override {
- slot_ = slot;
- }
- Code::Kind FeedbackICSlotKind(int index) override { return Code::STORE_IC; }
- FeedbackVectorICSlot GetNthSlot(int n) const {
- return FeedbackVectorICSlot(slot_.ToInt() + n);
- }
+ void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec,
+ ICSlotCache* cache) override;
- // If value needs a home object, returns a valid feedback vector ic slot
- // given by slot_index, and increments slot_index.
- FeedbackVectorICSlot SlotForHomeObject(Expression* value,
- int* slot_index) const;
+ bool NeedsProxySlot() const {
+ return FLAG_vector_stores && scope() != NULL &&
+ class_variable_proxy()->var()->IsUnallocated();
+ }
-#ifdef DEBUG
- int slot_count() const { return slot_count_; }
-#endif
+ FeedbackVectorICSlot ProxySlot() const { return slot_; }
protected:
ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
@@ -2745,9 +2664,6 @@ class ClassLiteral final : public Expression {
constructor_(constructor),
properties_(properties),
end_position_(end_position),
-#ifdef DEBUG
- slot_count_(0),
-#endif
slot_(FeedbackVectorICSlot::Invalid()) {
}
@@ -2763,11 +2679,6 @@ class ClassLiteral final : public Expression {
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
int end_position_;
-#ifdef DEBUG
- // slot_count_ helps validate that the logic to allocate ic slots and the
- // logic to use them are in sync.
- int slot_count_;
-#endif
FeedbackVectorICSlot slot_;
};
@@ -2848,6 +2759,17 @@ class SuperCallReference final : public Expression {
};
+// This class is produced when parsing the () in arrow functions without any
+// arguments and is not actually a valid expression.
+class EmptyParentheses final : public Expression {
+ public:
+ DECLARE_NODE_TYPE(EmptyParentheses)
+
+ private:
+ EmptyParentheses(Zone* zone, int pos) : Expression(zone, pos) {}
+};
+
+
#undef DECLARE_NODE_TYPE
@@ -3391,6 +3313,12 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (local_zone_) EmptyStatement(local_zone_, pos);
}
+ SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(
+ Statement* statement, Scope* scope) {
+ return new (local_zone_)
+ SloppyBlockFunctionStatement(local_zone_, statement, scope);
+ }
+
CaseClause* NewCaseClause(
Expression* label, ZoneList<Statement*>* statements, int pos) {
return new (local_zone_) CaseClause(local_zone_, label, statements, pos);
@@ -3520,12 +3448,21 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (local_zone_) CallNew(local_zone_, expression, arguments, pos);
}
- CallRuntime* NewCallRuntime(const AstRawString* name,
- const Runtime::Function* function,
- ZoneList<Expression*>* arguments,
- int pos) {
+ CallRuntime* NewCallRuntime(Runtime::FunctionId id,
+ ZoneList<Expression*>* arguments, int pos) {
return new (local_zone_)
- CallRuntime(local_zone_, name, function, arguments, pos);
+ CallRuntime(local_zone_, Runtime::FunctionForId(id), arguments, pos);
+ }
+
+ CallRuntime* NewCallRuntime(const Runtime::Function* function,
+ ZoneList<Expression*>* arguments, int pos) {
+ return new (local_zone_) CallRuntime(local_zone_, function, arguments, pos);
+ }
+
+ CallRuntime* NewCallRuntime(int context_index,
+ ZoneList<Expression*>* arguments, int pos) {
+ return new (local_zone_)
+ CallRuntime(local_zone_, context_index, arguments, pos);
}
UnaryOperation* NewUnaryOperation(Token::Value op,
@@ -3649,6 +3586,10 @@ class AstNodeFactory final BASE_EMBEDDED {
parser_zone_, this_var, new_target_var, this_function_var, pos);
}
+ EmptyParentheses* NewEmptyParentheses(int pos) {
+ return new (local_zone_) EmptyParentheses(local_zone_, pos);
+ }
+
Zone* zone() const { return local_zone_; }
// Handles use of temporary zones when parsing inner function bodies.
diff --git a/deps/v8/src/atomic-utils.h b/deps/v8/src/atomic-utils.h
new file mode 100644
index 0000000000..2aa78f8b5e
--- /dev/null
+++ b/deps/v8/src/atomic-utils.h
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ATOMIC_UTILS_H_
+#define V8_ATOMIC_UTILS_H_
+
+#include <limits.h>
+
+#include "src/base/atomicops.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+template <class T>
+class AtomicNumber {
+ public:
+ AtomicNumber() : value_(0) {}
+ explicit AtomicNumber(T initial) : value_(initial) {}
+
+ V8_INLINE void Increment(T increment) {
+ base::Barrier_AtomicIncrement(&value_,
+ static_cast<base::AtomicWord>(increment));
+ }
+
+ V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
+
+ V8_INLINE void SetValue(T new_value) {
+ base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));
+ }
+
+ V8_INLINE T operator=(T value) {
+ SetValue(value);
+ return value;
+ }
+
+ private:
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+
+ base::AtomicWord value_;
+};
+
+
+// Flag using T atomically. Also accepts void* as T.
+template <typename T>
+class AtomicValue {
+ public:
+ AtomicValue() : value_(0) {}
+
+ explicit AtomicValue(T initial)
+ : value_(cast_helper<T>::to_storage_type(initial)) {}
+
+ V8_INLINE T Value() {
+ return cast_helper<T>::to_return_type(base::Acquire_Load(&value_));
+ }
+
+ V8_INLINE bool TrySetValue(T old_value, T new_value) {
+ return base::Release_CompareAndSwap(
+ &value_, cast_helper<T>::to_storage_type(old_value),
+ cast_helper<T>::to_storage_type(new_value)) ==
+ cast_helper<T>::to_storage_type(old_value);
+ }
+
+ V8_INLINE void SetValue(T new_value) {
+ base::Release_Store(&value_, cast_helper<T>::to_storage_type(new_value));
+ }
+
+ private:
+ STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
+
+ template <typename S>
+ struct cast_helper {
+ static base::AtomicWord to_storage_type(S value) {
+ return static_cast<base::AtomicWord>(value);
+ }
+ static S to_return_type(base::AtomicWord value) {
+ return static_cast<S>(value);
+ }
+ };
+
+ template <typename S>
+ struct cast_helper<S*> {
+ static base::AtomicWord to_storage_type(S* value) {
+ return reinterpret_cast<base::AtomicWord>(value);
+ }
+ static S* to_return_type(base::AtomicWord value) {
+ return reinterpret_cast<S*>(value);
+ }
+ };
+
+ base::AtomicWord value_;
+};
+
+
+// See utils.h for EnumSet. Storage is always base::AtomicWord.
+// Requirements on E:
+// - No explicit values.
+// - E::kLastValue defined to be the last actually used value.
+//
+// Example:
+// enum E { kA, kB, kC, kLastValue = kC };
+template <class E>
+class AtomicEnumSet {
+ public:
+ explicit AtomicEnumSet(base::AtomicWord bits = 0) : bits_(bits) {}
+
+ bool IsEmpty() const { return ToIntegral() == 0; }
+
+ bool Contains(E element) const { return (ToIntegral() & Mask(element)) != 0; }
+ bool ContainsAnyOf(const AtomicEnumSet& set) const {
+ return (ToIntegral() & set.ToIntegral()) != 0;
+ }
+
+ void RemoveAll() { base::Release_Store(&bits_, 0); }
+
+ bool operator==(const AtomicEnumSet& set) const {
+ return ToIntegral() == set.ToIntegral();
+ }
+
+ bool operator!=(const AtomicEnumSet& set) const {
+ return ToIntegral() != set.ToIntegral();
+ }
+
+ AtomicEnumSet<E> operator|(const AtomicEnumSet& set) const {
+ return AtomicEnumSet<E>(ToIntegral() | set.ToIntegral());
+ }
+
+// The following operations modify the underlying storage.
+
+#define ATOMIC_SET_WRITE(OP, NEW_VAL) \
+ do { \
+ base::AtomicWord old; \
+ do { \
+ old = base::Acquire_Load(&bits_); \
+ } while (base::Release_CompareAndSwap(&bits_, old, old OP NEW_VAL) != \
+ old); \
+ } while (false)
+
+ void Add(E element) { ATOMIC_SET_WRITE(|, Mask(element)); }
+
+ void Add(const AtomicEnumSet& set) { ATOMIC_SET_WRITE(|, set.ToIntegral()); }
+
+ void Remove(E element) { ATOMIC_SET_WRITE(&, ~Mask(element)); }
+
+ void Remove(const AtomicEnumSet& set) {
+ ATOMIC_SET_WRITE(&, ~set.ToIntegral());
+ }
+
+ void Intersect(const AtomicEnumSet& set) {
+ ATOMIC_SET_WRITE(&, set.ToIntegral());
+ }
+
+#undef ATOMIC_SET_OP
+
+ private:
+ // Check whether there's enough storage to hold E.
+ STATIC_ASSERT(E::kLastValue < (sizeof(base::AtomicWord) * CHAR_BIT));
+
+ V8_INLINE base::AtomicWord ToIntegral() const {
+ return base::Acquire_Load(&bits_);
+ }
+
+ V8_INLINE base::AtomicWord Mask(E element) const {
+ return static_cast<base::AtomicWord>(1) << element;
+ }
+
+ base::AtomicWord bits_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // #define V8_ATOMIC_UTILS_H_
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 05b8e427c6..b63c5fbfba 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -154,10 +154,12 @@ namespace internal {
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \
+ V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotADate, "Operand is not a date") \
+ V(kOperandIsNotAFunction, "Operand is not a function") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
V(kOperandIsNotASmi, "Operand is not a smi") \
@@ -180,7 +182,6 @@ namespace internal {
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
- V(kRestParameter, "Rest parameters") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kScriptContext, "Allocation of script context") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
@@ -197,8 +198,6 @@ namespace internal {
V(kTheInstructionShouldBeAnOris, "The instruction should be an oris") \
V(kTheInstructionShouldBeALi, "The instruction should be a li") \
V(kTheInstructionShouldBeASldi, "The instruction should be a sldi") \
- V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
- "The instruction to patch should be a load from the constant pool") \
V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
"The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALis, \
diff --git a/deps/v8/src/base.isolate b/deps/v8/src/base.isolate
new file mode 100644
index 0000000000..dceb413339
--- /dev/null
+++ b/deps/v8/src/base.isolate
@@ -0,0 +1,15 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['v8_use_external_startup_data==1', {
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/natives_blob.bin',
+ '<(PRODUCT_DIR)/snapshot_blob.bin',
+ ],
+ },
+ }],
+ ],
+} \ No newline at end of file
diff --git a/deps/v8/src/base/atomicops_internals_x86_gcc.cc b/deps/v8/src/base/atomicops_internals_x86_gcc.cc
index 969f2371b0..ab7dd8d091 100644
--- a/deps/v8/src/base/atomicops_internals_x86_gcc.cc
+++ b/deps/v8/src/base/atomicops_internals_x86_gcc.cc
@@ -5,7 +5,7 @@
// This module gets enough CPU information to optimize the
// atomicops module on x86.
-#include <string.h>
+#include <string.h> // NOLINT(build/include)
#include "src/base/atomicops.h"
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index abcfd9a9ed..f5710dfb5c 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -105,8 +105,8 @@ inline unsigned CountTrailingZeros32(uint32_t value) {
#else
if (value == 0) return 32;
unsigned count = 0;
- for (value ^= value - 1; value >>= 1; ++count)
- ;
+ for (value ^= value - 1; value >>= 1; ++count) {
+ }
return count;
#endif
}
@@ -121,8 +121,8 @@ inline unsigned CountTrailingZeros64(uint64_t value) {
#else
if (value == 0) return 64;
unsigned count = 0;
- for (value ^= value - 1; value >>= 1; ++count)
- ;
+ for (value ^= value - 1; value >>= 1; ++count) {
+ }
return count;
#endif
}
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index b8ba4eb8d2..8016218e5c 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -164,7 +164,11 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_MIPS64
+#if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE)
+#define V8_TARGET_BIG_ENDIAN 1
+#else
#define V8_TARGET_LITTLE_ENDIAN 1
+#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE
diff --git a/deps/v8/src/base/platform/condition-variable.cc b/deps/v8/src/base/platform/condition-variable.cc
index 982497fea4..e5b9bd0810 100644
--- a/deps/v8/src/base/platform/condition-variable.cc
+++ b/deps/v8/src/base/platform/condition-variable.cc
@@ -264,8 +264,8 @@ void ConditionVariable::Wait(Mutex* mutex) {
mutex->Unlock();
// Wait on the wait event.
- while (!event->WaitFor(INFINITE))
- ;
+ while (!event->WaitFor(INFINITE)) {
+ }
// Reaquire the user mutex.
mutex->Lock();
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index 513f5c8eca..03e9aa3717 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -46,7 +46,7 @@ static inline void* mmapHelper(size_t len, int prot, int flags, int fildes,
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(floor(time / msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on AIX.
}
@@ -56,7 +56,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
// On AIX, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
DCHECK(utc != -1);
- struct tm* loc = localtime(&utc);
+ struct tm* loc = localtime(&utc); // NOLINT(runtime/threadsafe_fn)
DCHECK(loc != NULL);
return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index e09fc68395..18f151ac29 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -29,7 +29,7 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Cygwin.
}
@@ -39,7 +39,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
// On Cygwin, struct tm does not contain a tm_gmtoff field.
time_t utc = time(NULL);
DCHECK(utc != -1);
- struct tm* loc = localtime(&utc);
+ struct tm* loc = localtime(&utc); // NOLINT(runtime/threadsafe_fn)
DCHECK(loc != NULL);
// time - localtime includes any daylight savings offset, so subtract it.
return static_cast<double>((mktime(loc) - utc) * msPerSecond -
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 8fe908c2df..b279e0c926 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -17,7 +17,6 @@
#include <sys/fcntl.h> // open
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
#include <unistd.h> // getpagesize
// If you don't have execinfo.h then you need devel/libexecinfo from ports.
#include <errno.h>
@@ -40,7 +39,7 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return t->tm_zone;
}
@@ -48,7 +47,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 874c6dbc31..2a2abfeb25 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -108,7 +108,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
#else
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (!t || !t->tm_zone) return "";
return t->tm_zone;
#endif
@@ -121,7 +121,7 @@ double OS::LocalTimeOffset(TimezoneCache* cache) {
return 0;
#else
time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
@@ -310,16 +310,19 @@ void VirtualMemory::Reset() {
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ CHECK(InVM(address, size));
return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
+ CHECK(InVM(address, size));
return UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
+ CHECK(InVM(address, OS::CommitPageSize()));
OS::Guard(address, OS::CommitPageSize());
return true;
}
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 4b760c3b86..f16f329fc3 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -102,7 +102,7 @@ void OS::SignalCodeMovingGC() {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return t->tm_zone;
}
@@ -110,7 +110,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index 32b1af1ca8..369dd8e1a6 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -20,7 +20,6 @@
#include <strings.h> // index
#include <sys/mman.h> // mmap & munmap
#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
#include <unistd.h> // sysconf
#include <cmath>
@@ -38,7 +37,7 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return t->tm_zone;
}
@@ -46,7 +45,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 8a037c02d3..046dbb69c3 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -384,7 +384,7 @@ void OS::ClearTimezoneCache(TimezoneCache* cache) {
double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return std::numeric_limits<double>::quiet_NaN();
return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
}
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 9f22db5d1e..b16652886e 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -24,7 +24,6 @@
#include <sys/mman.h> // mmap & munmap
#include <sys/procfs.h>
#include <sys/stat.h> // open
-#include <sys/types.h> // mmap & munmap
#include <unistd.h> // sysconf
#include <cmath>
@@ -89,7 +88,7 @@ bool OS::ArmUsingHardFloat() {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return t->tm_zone;
}
@@ -97,7 +96,7 @@ const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
double OS::LocalTimeOffset(TimezoneCache* cache) {
time_t tv = time(NULL);
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
// tm_gmtoff includes any daylight savings offset, so subtract it.
return static_cast<double>(t->tm_gmtoff * msPerSecond -
(t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index e844aa12b4..7e07f1a1e2 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -38,7 +38,7 @@ namespace base {
const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
if (std::isnan(time)) return "";
time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
- struct tm* t = localtime(&tv);
+ struct tm* t = localtime(&tv); // NOLINT(runtime/threadsafe_fn)
if (NULL == t) return "";
return tzname[0]; // The location of the timezone string on Solaris.
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index afd3061aa7..a73dc523c4 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -46,7 +46,7 @@ inline void MemoryBarrier() {
int localtime_s(tm* out_tm, const time_t* time) {
- tm* posix_local_time_struct = localtime(time);
+ tm* posix_local_time_struct = localtime(time); // NOLINT
if (posix_local_time_struct == NULL) return 1;
*out_tm = *posix_local_time_struct;
return 0;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 23ed123e66..2d08ecbd7f 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -272,6 +272,7 @@ class OS {
DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
};
+
// Represents and controls an area of reserved memory.
// Control of the reserved memory can be assigned to another VirtualMemory
// object by assignment or copy-contructing. This removes the reserved memory
@@ -329,6 +330,7 @@ class VirtualMemory {
// inside the allocated region.
void* address = address_;
size_t size = size_;
+ CHECK(InVM(address, size));
Reset();
bool result = ReleaseRegion(address, size);
USE(result);
@@ -360,6 +362,13 @@ class VirtualMemory {
static bool HasLazyCommits();
private:
+ bool InVM(void* address, size_t size) {
+ return (reinterpret_cast<uintptr_t>(address_) <=
+ reinterpret_cast<uintptr_t>(address)) &&
+ ((reinterpret_cast<uintptr_t>(address_) + size_) >=
+ (reinterpret_cast<uintptr_t>(address) + size));
+ }
+
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
diff --git a/deps/v8/src/base/smart-pointers.h b/deps/v8/src/base/smart-pointers.h
index 6528fca92d..df3fcac662 100644
--- a/deps/v8/src/base/smart-pointers.h
+++ b/deps/v8/src/base/smart-pointers.h
@@ -5,6 +5,8 @@
#ifndef V8_BASE_SMART_POINTERS_H_
#define V8_BASE_SMART_POINTERS_H_
+#include "src/base/logging.h"
+
namespace v8 {
namespace base {
diff --git a/deps/v8/src/bit-vector.cc b/deps/v8/src/bit-vector.cc
index 198b24273c..cdd00f89c4 100644
--- a/deps/v8/src/bit-vector.cc
+++ b/deps/v8/src/bit-vector.cc
@@ -21,7 +21,7 @@ void BitVector::Print() {
PrintF("%d", i);
}
}
- PrintF("}");
+ PrintF("}\n");
}
#endif
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 790a80b239..a8a5f97680 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -13,10 +13,16 @@
#include "src/extensions/gc-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
+#include "src/heap/heap.h"
+#include "src/isolate-inl.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/snapshot.h"
#include "third_party/fdlibm/fdlibm.h"
+#if defined(V8_WASM)
+#include "src/wasm/wasm-js.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -51,6 +57,8 @@ Handle<String> Bootstrapper::SourceLookup(int index) {
template Handle<String> Bootstrapper::SourceLookup<Natives>(int index);
template Handle<String> Bootstrapper::SourceLookup<ExperimentalNatives>(
int index);
+template Handle<String> Bootstrapper::SourceLookup<ExperimentalExtraNatives>(
+ int index);
template Handle<String> Bootstrapper::SourceLookup<ExtraNatives>(int index);
template Handle<String> Bootstrapper::SourceLookup<CodeStubNatives>(int index);
@@ -121,7 +129,10 @@ void Bootstrapper::TearDown() {
DeleteNativeSources(Natives::GetSourceCache(isolate_->heap()));
DeleteNativeSources(ExperimentalNatives::GetSourceCache(isolate_->heap()));
DeleteNativeSources(ExtraNatives::GetSourceCache(isolate_->heap()));
+ DeleteNativeSources(
+ ExperimentalExtraNatives::GetSourceCache(isolate_->heap()));
DeleteNativeSources(CodeStubNatives::GetSourceCache(isolate_->heap()));
+
extensions_cache_.Initialize(isolate_, false); // Yes, symmetrical
}
@@ -206,6 +217,7 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction>* fun);
bool InstallExperimentalNatives();
bool InstallExtraNatives();
+ bool InstallExperimentalExtraNatives();
bool InstallDebuggerNatives();
void InstallBuiltinFunctionIds();
void InstallExperimentalBuiltinFunctionIds();
@@ -337,13 +349,13 @@ bool Bootstrapper::CreateCodeStubContext(Isolate* isolate) {
Handle<Context> native_context = CreateEnvironment(
MaybeHandle<JSGlobalProxy>(), v8::Local<v8::ObjectTemplate>(),
&no_extensions, THIN_CONTEXT);
- isolate->heap()->set_code_stub_context(*native_context);
+ isolate->heap()->SetRootCodeStubContext(*native_context);
isolate->set_context(*native_context);
Handle<JSObject> code_stub_exports =
isolate->factory()->NewJSObject(isolate->object_function());
JSObject::NormalizeProperties(code_stub_exports, CLEAR_INOBJECT_PROPERTIES, 2,
"container to export to extra natives");
- isolate->heap()->set_code_stub_exports_object(*code_stub_exports);
+ isolate->heap()->SetRootCodeStubExportsObject(*code_stub_exports);
return InstallCodeStubNatives(isolate);
}
@@ -369,26 +381,43 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
}
-static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
- const char* name, InstanceType type,
- int instance_size,
- MaybeHandle<JSObject> maybe_prototype,
- Builtins::Name call,
- bool strict_function_map = false) {
+namespace {
+
+Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
+ InstanceType type, int instance_size,
+ MaybeHandle<JSObject> maybe_prototype,
+ Builtins::Name call,
+ PropertyAttributes attributes,
+ bool strict_function_map = false) {
Isolate* isolate = target->GetIsolate();
Factory* factory = isolate->factory();
- Handle<String> internalized_name = factory->InternalizeUtf8String(name);
+ Handle<String> name_string = Name::ToFunctionName(name).ToHandleChecked();
Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
Handle<JSObject> prototype;
static const bool kReadOnlyPrototype = false;
static const bool kInstallConstructor = false;
Handle<JSFunction> function =
maybe_prototype.ToHandle(&prototype)
- ? factory->NewFunction(internalized_name, call_code, prototype, type,
+ ? factory->NewFunction(name_string, call_code, prototype, type,
instance_size, kReadOnlyPrototype,
kInstallConstructor, strict_function_map)
- : factory->NewFunctionWithoutPrototype(internalized_name, call_code,
+ : factory->NewFunctionWithoutPrototype(name_string, call_code,
strict_function_map);
+ JSObject::AddProperty(target, name, function, attributes);
+ if (target->IsJSGlobalObject()) {
+ function->shared()->set_instance_class_name(*name_string);
+ }
+ function->shared()->set_native(true);
+ return function;
+}
+
+
+Handle<JSFunction> InstallFunction(Handle<JSObject> target, const char* name,
+ InstanceType type, int instance_size,
+ MaybeHandle<JSObject> maybe_prototype,
+ Builtins::Name call,
+ bool strict_function_map = false) {
+ Factory* const factory = target->GetIsolate()->factory();
PropertyAttributes attributes;
if (target->IsJSBuiltinsObject()) {
attributes =
@@ -396,14 +425,13 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target,
} else {
attributes = DONT_ENUM;
}
- JSObject::AddProperty(target, internalized_name, function, attributes);
- if (target->IsJSGlobalObject()) {
- function->shared()->set_instance_class_name(*internalized_name);
- }
- function->shared()->set_native(true);
- return function;
+ return InstallFunction(target, factory->InternalizeUtf8String(name), type,
+ instance_size, maybe_prototype, call, attributes,
+ strict_function_map);
}
+} // namespace
+
void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode) {
@@ -459,7 +487,8 @@ void Genesis::SetFunctionInstanceDescriptor(Handle<Map> map,
Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetFunctionInstanceDescriptor(map, function_mode);
- map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
+ map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ map->set_is_callable();
return map;
}
@@ -547,7 +576,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// --- E m p t y ---
Handle<String> source = factory->NewStringFromStaticChars("() {}");
Handle<Script> script = factory->NewScript(source);
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+ script->set_type(Script::TYPE_NATIVE);
empty_function->shared()->set_start_position(0);
empty_function->shared()->set_end_position(source->length());
empty_function->shared()->DontAdaptArguments();
@@ -698,7 +727,8 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
FunctionMode function_mode, Handle<JSFunction> empty_function) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrictFunctionInstanceDescriptor(map, function_mode);
- map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
+ map->set_is_constructor(IsFunctionModeWithPrototype(function_mode));
+ map->set_is_callable();
Map::SetPrototype(map, empty_function);
return map;
}
@@ -708,8 +738,9 @@ Handle<Map> Genesis::CreateStrongFunctionMap(
Handle<JSFunction> empty_function, bool is_constructor) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetStrongFunctionInstanceDescriptor(map);
- map->set_function_with_prototype(is_constructor);
+ map->set_is_constructor(is_constructor);
Map::SetPrototype(map, empty_function);
+ map->set_is_callable();
map->set_is_extensible(is_constructor);
map->set_is_strong();
return map;
@@ -735,10 +766,20 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
strict_function_map_writable_prototype_ =
CreateStrictFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE, empty);
- // Special map for bound functions.
- Handle<Map> bound_function_map =
+ // Special map for non-constructor bound functions.
+ // TODO(bmeurer): Bound functions should not be represented as JSFunctions.
+ Handle<Map> bound_function_without_constructor_map =
CreateStrictFunctionMap(BOUND_FUNCTION, empty);
- native_context()->set_bound_function_map(*bound_function_map);
+ native_context()->set_bound_function_without_constructor_map(
+ *bound_function_without_constructor_map);
+
+ // Special map for constructor bound functions.
+ // TODO(bmeurer): Bound functions should not be represented as JSFunctions.
+ Handle<Map> bound_function_with_constructor_map =
+ Map::Copy(bound_function_without_constructor_map, "IsConstructor");
+ bound_function_with_constructor_map->set_is_constructor(true);
+ native_context()->set_bound_function_with_constructor_map(
+ *bound_function_with_constructor_map);
}
@@ -836,7 +877,7 @@ void Genesis::HookUpGlobalThisBinding(Handle<FixedArray> outdated_contexts) {
for (int i = 0; i < outdated_contexts->length(); ++i) {
Context* context = Context::cast(outdated_contexts->get(i));
if (context->IsScriptContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ ScopeInfo* scope_info = context->scope_info();
int slot = scope_info->ReceiverContextSlotIndex();
if (slot >= 0) {
DCHECK_EQ(slot, Context::MIN_CONTEXT_SLOTS);
@@ -1016,8 +1057,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
Handle<JSObject> global(native_context()->global_object());
// Install global Function object
- InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
- empty_function, Builtins::kIllegal);
+ Handle<JSFunction> function_function =
+ InstallFunction(global, "Function", JS_FUNCTION_TYPE, JSFunction::kSize,
+ empty_function, Builtins::kIllegal);
+ function_function->initial_map()->set_is_callable();
{ // --- A r r a y ---
Handle<JSFunction> array_function =
@@ -1085,12 +1128,13 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
}
{ // --- S t r i n g ---
- Handle<JSFunction> string_fun =
- InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(),
- Builtins::kIllegal);
- string_fun->shared()->set_construct_stub(
- isolate->builtins()->builtin(Builtins::kStringConstructCode));
+ Handle<JSFunction> string_fun = InstallFunction(
+ global, "String", JS_VALUE_TYPE, JSValue::kSize,
+ isolate->initial_object_prototype(), Builtins::kStringConstructor);
+ string_fun->shared()->set_construct_stub(isolate->builtins()->builtin(
+ Builtins::kStringConstructor_ConstructStub));
+ string_fun->shared()->DontAdaptArguments();
+ string_fun->shared()->set_length(1);
native_context()->set_string_function(*string_fun);
Handle<Map> string_map =
@@ -1113,7 +1157,11 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
// --- S y m b o l ---
Handle<JSFunction> symbol_fun = InstallFunction(
global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
- isolate->initial_object_prototype(), Builtins::kIllegal);
+ isolate->initial_object_prototype(), Builtins::kSymbolConstructor);
+ symbol_fun->shared()->set_construct_stub(isolate->builtins()->builtin(
+ Builtins::kSymbolConstructor_ConstructStub));
+ symbol_fun->shared()->set_internal_formal_parameter_count(1);
+ symbol_fun->shared()->set_length(1);
native_context()->set_symbol_function(*symbol_fun);
}
@@ -1279,32 +1327,26 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
native_context()->set_js_set_fun(*js_set_fun);
}
- { // Set up the iterator result object
- STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
- Handle<JSFunction> object_function(native_context()->object_function());
- Handle<Map> iterator_result_map =
- Map::Create(isolate, JSGeneratorObject::kResultPropertyCount);
- DCHECK_EQ(JSGeneratorObject::kResultSize,
- iterator_result_map->instance_size());
- DCHECK_EQ(JSGeneratorObject::kResultPropertyCount,
- iterator_result_map->GetInObjectProperties());
- Map::EnsureDescriptorSlack(iterator_result_map,
- JSGeneratorObject::kResultPropertyCount);
-
- DataDescriptor value_descr(factory->value_string(),
- JSGeneratorObject::kResultValuePropertyIndex,
- NONE, Representation::Tagged());
- iterator_result_map->AppendDescriptor(&value_descr);
-
- DataDescriptor done_descr(factory->done_string(),
- JSGeneratorObject::kResultDonePropertyIndex, NONE,
- Representation::Tagged());
- iterator_result_map->AppendDescriptor(&done_descr);
-
- iterator_result_map->set_unused_property_fields(0);
- DCHECK_EQ(JSGeneratorObject::kResultSize,
- iterator_result_map->instance_size());
- native_context()->set_iterator_result_map(*iterator_result_map);
+ { // -- I t e r a t o r R e s u l t
+ Handle<Map> map =
+ factory->NewMap(JS_ITERATOR_RESULT_TYPE, JSIteratorResult::kSize);
+ Map::SetPrototype(map, isolate->initial_object_prototype());
+ Map::EnsureDescriptorSlack(map, 2);
+
+ { // value
+ DataDescriptor d(factory->value_string(), JSIteratorResult::kValueIndex,
+ NONE, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ { // done
+ DataDescriptor d(factory->done_string(), JSIteratorResult::kDoneIndex,
+ NONE, Representation::Tagged());
+ map->AppendDescriptor(&d);
+ }
+
+ map->SetInObjectProperties(2);
+ native_context()->set_iterator_result_map(*map);
}
// -- W e a k M a p
@@ -1341,7 +1383,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
}
// @@iterator method is added later.
- map->set_function_with_prototype(true);
map->SetInObjectProperties(2);
native_context()->set_sloppy_arguments_map(*map);
@@ -1407,7 +1448,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
}
// @@iterator method is added later.
- map->set_function_with_prototype(true);
DCHECK_EQ(native_context()->object_function()->prototype(),
*isolate->initial_object_prototype());
Map::SetPrototype(map, isolate->initial_object_prototype());
@@ -1494,9 +1534,13 @@ bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
Vector<const char> name = Natives::GetScriptName(index);
Handle<String> source_code =
isolate->bootstrapper()->SourceLookup<Natives>(index);
+
+ // We pass in extras_utils so that builtin code can set it up for later use
+ // by actual extras code, compiled with CompileExtraBuiltin.
Handle<Object> global = isolate->global_object();
Handle<Object> utils = isolate->natives_utils_object();
- Handle<Object> args[] = {global, utils};
+ Handle<Object> extras_utils = isolate->extras_utils_object();
+ Handle<Object> args[] = {global, utils, extras_utils};
return Bootstrapper::CompileNative(
isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
@@ -1525,7 +1569,24 @@ bool Bootstrapper::CompileExtraBuiltin(Isolate* isolate, int index) {
isolate->bootstrapper()->SourceLookup<ExtraNatives>(index);
Handle<Object> global = isolate->global_object();
Handle<Object> binding = isolate->extras_binding_object();
- Handle<Object> args[] = {global, binding};
+ Handle<Object> extras_utils = isolate->extras_utils_object();
+ Handle<Object> args[] = {global, binding, extras_utils};
+ return Bootstrapper::CompileNative(
+ isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
+ source_code, arraysize(args), args);
+}
+
+
+bool Bootstrapper::CompileExperimentalExtraBuiltin(Isolate* isolate,
+ int index) {
+ HandleScope scope(isolate);
+ Vector<const char> name = ExperimentalExtraNatives::GetScriptName(index);
+ Handle<String> source_code =
+ isolate->bootstrapper()->SourceLookup<ExperimentalExtraNatives>(index);
+ Handle<Object> global = isolate->global_object();
+ Handle<Object> binding = isolate->extras_binding_object();
+ Handle<Object> extras_utils = isolate->extras_utils_object();
+ Handle<Object> args[] = {global, binding, extras_utils};
return Bootstrapper::CompileNative(
isolate, name, Handle<JSObject>(isolate->native_context()->builtins()),
source_code, arraysize(args), args);
@@ -1677,18 +1738,21 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
template <typename Data>
Data* SetBuiltinTypedArray(Isolate* isolate, Handle<JSBuiltinsObject> builtins,
ExternalArrayType type, Data* data,
- size_t num_elements, const char* name) {
+ size_t num_elements, const char* name,
+ const SharedFlag shared = SharedFlag::kNotShared,
+ const PretenureFlag pretenure = TENURED) {
size_t byte_length = num_elements * sizeof(*data);
- Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
+ Handle<JSArrayBuffer> buffer =
+ isolate->factory()->NewJSArrayBuffer(shared, pretenure);
bool is_external = data != nullptr;
if (!is_external) {
data = reinterpret_cast<Data*>(
isolate->array_buffer_allocator()->Allocate(byte_length));
}
- Runtime::SetupArrayBuffer(isolate, buffer, is_external, data, byte_length);
+ JSArrayBuffer::Setup(buffer, isolate, is_external, data, byte_length, shared);
- Handle<JSTypedArray> typed_array =
- isolate->factory()->NewJSTypedArray(type, buffer, 0, num_elements);
+ Handle<JSTypedArray> typed_array = isolate->factory()->NewJSTypedArray(
+ type, buffer, 0, num_elements, pretenure);
Handle<String> name_string = isolate->factory()->InternalizeUtf8String(name);
// Reset property cell type before (re)initializing.
JSBuiltinsObject::InvalidatePropertyCell(builtins, name_string);
@@ -1730,159 +1794,68 @@ void Genesis::InitializeBuiltinTypedArrays() {
}
-#define INSTALL_NATIVE(Type, name, var) \
- Handle<Object> var##_native = \
- Object::GetProperty(isolate, container, name, STRICT).ToHandleChecked(); \
- DCHECK(var##_native->Is##Type()); \
- native_context->set_##var(Type::cast(*var##_native));
-
-
-void Bootstrapper::ImportNatives(Isolate* isolate, Handle<JSObject> container) {
- HandleScope scope(isolate);
- Handle<Context> native_context = isolate->native_context();
- INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
- INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
- INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
- INSTALL_NATIVE(JSFunction, "ToDetailString", to_detail_string_fun);
- INSTALL_NATIVE(JSFunction, "NoSideEffectToString",
- no_side_effect_to_string_fun);
- INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
- INSTALL_NATIVE(JSFunction, "ToLength", to_length_fun);
-
- INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
- INSTALL_NATIVE(JSFunction, "GetStackTraceLine", get_stack_trace_line_fun);
- INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
- to_complete_property_descriptor);
- INSTALL_NATIVE(JSFunction, "ObjectDefineOwnProperty",
- object_define_own_property);
- INSTALL_NATIVE(JSFunction, "ObjectGetOwnPropertyDescriptor",
- object_get_own_property_descriptor);
- INSTALL_NATIVE(JSFunction, "MessageGetLineNumber", message_get_line_number);
- INSTALL_NATIVE(JSFunction, "MessageGetColumnNumber",
- message_get_column_number);
- INSTALL_NATIVE(JSFunction, "MessageGetSourceLine", message_get_source_line);
- INSTALL_NATIVE(JSObject, "StackOverflowBoilerplate",
- stack_overflow_boilerplate);
- INSTALL_NATIVE(JSFunction, "JsonSerializeAdapter", json_serialize_adapter);
-
- INSTALL_NATIVE(JSFunction, "Error", error_function);
- INSTALL_NATIVE(JSFunction, "EvalError", eval_error_function);
- INSTALL_NATIVE(JSFunction, "RangeError", range_error_function);
- INSTALL_NATIVE(JSFunction, "ReferenceError", reference_error_function);
- INSTALL_NATIVE(JSFunction, "SyntaxError", syntax_error_function);
- INSTALL_NATIVE(JSFunction, "TypeError", type_error_function);
- INSTALL_NATIVE(JSFunction, "URIError", uri_error_function);
- INSTALL_NATIVE(JSFunction, "MakeError", make_error_function);
-
- INSTALL_NATIVE(Symbol, "promiseStatus", promise_status);
- INSTALL_NATIVE(Symbol, "promiseValue", promise_value);
- INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
- INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
- INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
- INSTALL_NATIVE(JSFunction, "PromiseChain", promise_chain);
- INSTALL_NATIVE(JSFunction, "PromiseCatch", promise_catch);
- INSTALL_NATIVE(JSFunction, "PromiseThen", promise_then);
- INSTALL_NATIVE(JSFunction, "PromiseHasUserDefinedRejectHandler",
- promise_has_user_defined_reject_handler);
-
- INSTALL_NATIVE(JSFunction, "ObserveNotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "ObserveEnqueueSpliceRecord",
- observers_enqueue_splice);
- INSTALL_NATIVE(JSFunction, "ObserveBeginPerformSplice",
- observers_begin_perform_splice);
- INSTALL_NATIVE(JSFunction, "ObserveEndPerformSplice",
- observers_end_perform_splice);
- INSTALL_NATIVE(JSFunction, "ObserveNativeObjectObserve",
- native_object_observe);
- INSTALL_NATIVE(JSFunction, "ObserveNativeObjectGetNotifier",
- native_object_get_notifier);
- INSTALL_NATIVE(JSFunction, "ObserveNativeObjectNotifierPerformChange",
- native_object_notifier_perform_change);
-
- INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
- INSTALL_NATIVE(JSFunction, "MapGet", map_get);
- INSTALL_NATIVE(JSFunction, "MapSet", map_set);
- INSTALL_NATIVE(JSFunction, "MapHas", map_has);
- INSTALL_NATIVE(JSFunction, "MapDelete", map_delete);
- INSTALL_NATIVE(JSFunction, "SetAdd", set_add);
- INSTALL_NATIVE(JSFunction, "SetHas", set_has);
- INSTALL_NATIVE(JSFunction, "SetDelete", set_delete);
- INSTALL_NATIVE(JSFunction, "MapFromArray", map_from_array);
- INSTALL_NATIVE(JSFunction, "SetFromArray", set_from_array);
-}
-
-
-#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
- static void InstallExperimentalNatives_##id(Isolate* isolate, \
- Handle<Context> native_context, \
- Handle<JSObject> container) {}
-
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy_function)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy_let)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode_regexps)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_rest_parameters)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_default_parameters)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_reflect)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_spreadcalls)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_destructuring)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object_observe)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_spread_arrays)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sharedarraybuffer)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_atomics)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_new_target)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_concat_spreadable)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_simd)
-
-
-static void InstallExperimentalNatives_harmony_proxies(
- Isolate* isolate, Handle<Context> native_context,
- Handle<JSObject> container) {
- if (FLAG_harmony_proxies) {
- INSTALL_NATIVE(JSFunction, "ProxyDerivedGetTrap", derived_get_trap);
- INSTALL_NATIVE(JSFunction, "ProxyDerivedHasTrap", derived_has_trap);
- INSTALL_NATIVE(JSFunction, "ProxyDerivedSetTrap", derived_set_trap);
- INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
- }
-}
-
-
-void Bootstrapper::ImportExperimentalNatives(Isolate* isolate,
- Handle<JSObject> container) {
+void Bootstrapper::ExportFromRuntime(Isolate* isolate,
+ Handle<JSObject> container) {
HandleScope scope(isolate);
- Handle<Context> native_context = isolate->native_context();
-#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) \
- InstallExperimentalNatives_##id(isolate, native_context, container);
+#define EXPORT_PRIVATE_SYMBOL(NAME) \
+ Handle<String> NAME##_name = \
+ isolate->factory()->NewStringFromAsciiChecked(#NAME); \
+ JSObject::AddProperty(container, NAME##_name, isolate->factory()->NAME(), \
+ NONE);
+ PRIVATE_SYMBOL_LIST(EXPORT_PRIVATE_SYMBOL)
+#undef EXPORT_PRIVATE_SYMBOL
+
+#define EXPORT_PUBLIC_SYMBOL(NAME, DESCRIPTION) \
+ Handle<String> NAME##_name = \
+ isolate->factory()->NewStringFromAsciiChecked(#NAME); \
+ JSObject::AddProperty(container, NAME##_name, isolate->factory()->NAME(), \
+ NONE);
+ PUBLIC_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
+#undef EXPORT_PUBLIC_SYMBOL
- HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
- HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
- HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
-#undef INSTALL_NATIVE_FUNCTIONS_FOR
-}
+ {
+ Handle<JSFunction> apply = InstallFunction(
+ container, "reflect_apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kReflectApply);
+ apply->shared()->set_internal_formal_parameter_count(3);
+ apply->shared()->set_length(3);
+ Handle<TypeFeedbackVector> feedback_vector =
+ TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
+ apply->shared()->set_feedback_vector(*feedback_vector);
+ isolate->native_context()->set_reflect_apply(*apply);
+ }
-#undef INSTALL_NATIVE
+ {
+ Handle<JSFunction> construct = InstallFunction(
+ container, "reflect_construct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
+ construct->shared()->set_internal_formal_parameter_count(3);
+ construct->shared()->set_length(2);
+ Handle<TypeFeedbackVector> feedback_vector =
+ TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate);
+ construct->shared()->set_feedback_vector(*feedback_vector);
+ isolate->native_context()->set_reflect_construct(*construct);
+ }
+}
-bool Bootstrapper::InstallJSBuiltins(Isolate* isolate,
- Handle<JSObject> container) {
+void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
+ Handle<JSObject> container) {
HandleScope scope(isolate);
- Handle<JSBuiltinsObject> builtins = isolate->js_builtins_object();
- for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
- Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Handle<Object> function_object =
- Object::GetProperty(isolate, container, Builtins::GetName(id))
- .ToHandleChecked();
- DCHECK(function_object->IsJSFunction());
- Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- builtins->set_javascript_builtin(id, *function);
+
+#define INITIALIZE_FLAG(FLAG) \
+ { \
+ Handle<String> name = \
+ isolate->factory()->NewStringFromAsciiChecked(#FLAG); \
+ JSObject::AddProperty(container, name, \
+ isolate->factory()->ToBoolean(FLAG), NONE); \
}
- return true;
+
+ INITIALIZE_FLAG(FLAG_harmony_regexps)
+ INITIALIZE_FLAG(FLAG_harmony_unicode_regexps)
+ INITIALIZE_FLAG(FLAG_harmony_tostring)
+
+#undef INITIALIZE_FLAG
}
@@ -1898,58 +1871,27 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_function)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy_let)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_rest_parameters)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_default_parameters)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spreadcalls)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_calls)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_destructuring)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_observe)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_spread_arrays)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_atomics)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_new_target)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_concat_spreadable)
-
-void Genesis::InitializeGlobal_harmony_regexps() {
- Handle<JSObject> builtins(native_context()->builtins());
-
- Handle<HeapObject> flag(FLAG_harmony_regexps ? heap()->true_value()
- : heap()->false_value());
- Runtime::SetObjectProperty(isolate(), builtins,
- factory()->harmony_regexps_string(), flag,
- STRICT).Assert();
-}
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexps)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode_regexps)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
-void Genesis::InitializeGlobal_harmony_unicode_regexps() {
+void Genesis::InitializeGlobal_harmony_tolength() {
Handle<JSObject> builtins(native_context()->builtins());
-
- Handle<HeapObject> flag(FLAG_harmony_unicode_regexps ? heap()->true_value()
- : heap()->false_value());
+ Handle<Object> flag(factory()->ToBoolean(FLAG_harmony_tolength));
Runtime::SetObjectProperty(isolate(), builtins,
- factory()->harmony_unicode_regexps_string(), flag,
+ factory()->harmony_tolength_string(), flag,
STRICT).Assert();
}
void Genesis::InitializeGlobal_harmony_reflect() {
- Handle<JSObject> builtins(native_context()->builtins());
-
- Handle<JSFunction> apply = InstallFunction(
- builtins, "$reflectApply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kReflectApply);
- apply->shared()->set_internal_formal_parameter_count(3);
- apply->shared()->set_length(3);
- Handle<TypeFeedbackVector> apply_feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
- apply->shared()->set_feedback_vector(*apply_feedback_vector);
-
- Handle<JSFunction> construct = InstallFunction(
- builtins, "$reflectConstruct", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kReflectConstruct);
- construct->shared()->set_internal_formal_parameter_count(3);
- construct->shared()->set_length(2);
- Handle<TypeFeedbackVector> construct_feedback_vector =
- TypeFeedbackVector::CreatePushAppliedArgumentsVector(isolate());
- construct->shared()->set_feedback_vector(*construct_feedback_vector);
-
if (!FLAG_harmony_reflect) return;
Handle<JSGlobalObject> global(JSGlobalObject::cast(
@@ -1962,16 +1904,6 @@ void Genesis::InitializeGlobal_harmony_reflect() {
}
-void Genesis::InitializeGlobal_harmony_tostring() {
- Handle<JSObject> builtins(native_context()->builtins());
-
- Handle<HeapObject> flag(FLAG_harmony_tostring ? heap()->true_value()
- : heap()->false_value());
- Runtime::SetObjectProperty(isolate(), builtins,
- factory()->harmony_tostring_string(), flag,
- STRICT).Assert();
-}
-
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@@ -2115,6 +2047,14 @@ bool Genesis::InstallNatives(ContextType context_type) {
"utils container for native scripts");
native_context()->set_natives_utils_object(*utils);
+ // Set up the extras utils object as a shared container between native
+ // scripts and extras. (Extras consume things added there by native scripts.)
+ Handle<JSObject> extras_utils =
+ factory()->NewJSObject(isolate()->object_function());
+ native_context()->set_extras_utils_object(*extras_utils);
+
+ InstallInternalArray(extras_utils, "InternalPackedArray", FAST_ELEMENTS);
+
int builtin_index = Natives::GetDebuggerCount();
// Only run prologue.js and runtime.js at this point.
DCHECK_EQ(builtin_index, Natives::GetIndex("prologue"));
@@ -2277,11 +2217,6 @@ bool Genesis::InstallNatives(ContextType context_type) {
script_is_embedder_debug_script, attribs);
script_map->AppendDescriptor(&d);
}
-
- // Allocate the empty script.
- Handle<Script> script = factory()->NewScript(factory()->empty_string());
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- heap()->public_set_empty_script(*script);
}
{
// Builtin function for OpaqueReference -- a JSValue-based object,
@@ -2354,9 +2289,11 @@ bool Genesis::InstallNatives(ContextType context_type) {
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
static const bool kUseStrictFunctionMap = true;
- InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE,
- JSFunction::kSize, generator_function_prototype,
- Builtins::kIllegal, kUseStrictFunctionMap);
+ Handle<JSFunction> generator_function_function =
+ InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE,
+ JSFunction::kSize, generator_function_prototype,
+ Builtins::kIllegal, kUseStrictFunctionMap);
+ generator_function_function->initial_map()->set_is_callable();
// Create maps for generator functions and their prototypes. Store those
// maps in the native context. The "prototype" property descriptor is
@@ -2399,17 +2336,6 @@ bool Genesis::InstallNatives(ContextType context_type) {
return true;
}
- // Install public symbols.
- {
- static const PropertyAttributes attributes =
- static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-#define INSTALL_PUBLIC_SYMBOL(name, varname, description) \
- Handle<String> varname = factory()->NewStringFromStaticChars(#varname); \
- JSObject::AddProperty(builtins, varname, factory()->name(), attributes);
- PUBLIC_SYMBOL_LIST(INSTALL_PUBLIC_SYMBOL)
-#undef INSTALL_PUBLIC_SYMBOL
- }
-
// Run the rest of the native scripts.
while (builtin_index < Natives::GetBuiltinsCount()) {
if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
@@ -2422,7 +2348,15 @@ bool Genesis::InstallNatives(ContextType context_type) {
USE_CUSTOM_MINIMUM_CAPACITY);
native_context()->set_function_cache(*function_cache);
- // Store the map for the string prototype after the natives has been compiled
+ // Store the map for the %ObjectPrototype% after the natives has been compiled
+ // and the Object function has been set up.
+ Handle<JSFunction> object_function(native_context()->object_function());
+ DCHECK(JSObject::cast(object_function->initial_map()->prototype())
+ ->HasFastProperties());
+ native_context()->set_object_function_prototype_map(
+ HeapObject::cast(object_function->initial_map()->prototype())->map());
+
+ // Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
Handle<JSFunction> string_function(native_context()->string_function());
DCHECK(JSObject::cast(
@@ -2430,6 +2364,63 @@ bool Genesis::InstallNatives(ContextType context_type) {
native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
+ // Install Date.prototype[@@toPrimitive].
+ {
+ Handle<String> key = factory()->Date_string();
+ Handle<JSFunction> date = Handle<JSFunction>::cast(
+ Object::GetProperty(handle(native_context()->global_object()), key)
+ .ToHandleChecked());
+ Handle<JSObject> proto =
+ Handle<JSObject>(JSObject::cast(date->instance_prototype()));
+
+ // Install the @@toPrimitive function.
+ Handle<JSFunction> to_primitive =
+ InstallFunction(proto, factory()->to_primitive_symbol(), JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kDateToPrimitive,
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+
+ // Set the expected parameters for @@toPrimitive to 1; required by builtin.
+ to_primitive->shared()->set_internal_formal_parameter_count(1);
+
+ // Set the length for the function to satisfy ECMA-262.
+ to_primitive->shared()->set_length(1);
+ }
+
+ // Install Array.prototype.concat
+ {
+ Handle<JSFunction> array_constructor(native_context()->array_function());
+ Handle<JSObject> proto(JSObject::cast(array_constructor->prototype()));
+ Handle<JSFunction> concat =
+ InstallFunction(proto, "concat", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kArrayConcat);
+
+ // Make sure that Array.prototype.concat appears to be compiled.
+ // The code will never be called, but inline caching for call will
+ // only work if it appears to be compiled.
+ concat->shared()->DontAdaptArguments();
+ DCHECK(concat->is_compiled());
+ // Set the lengths for the functions to satisfy ECMA-262.
+ concat->shared()->set_length(1);
+ }
+
+ // Install InternalArray.prototype.concat
+ {
+ Handle<JSFunction> array_constructor(
+ native_context()->internal_array_function());
+ Handle<JSObject> proto(JSObject::cast(array_constructor->prototype()));
+ Handle<JSFunction> concat =
+ InstallFunction(proto, "concat", JS_OBJECT_TYPE, JSObject::kHeaderSize,
+ MaybeHandle<JSObject>(), Builtins::kArrayConcat);
+
+ // Make sure that InternalArray.prototype.concat appears to be compiled.
+ // The code will never be called, but inline caching for call will
+ // only work if it appears to be compiled.
+ concat->shared()->DontAdaptArguments();
+ DCHECK(concat->is_compiled());
+ // Set the lengths for the functions to satisfy ECMA-262.
+ concat->shared()->set_length(1);
+ }
// Install Function.prototype.call and apply.
{
Handle<String> key = factory()->Function_string();
@@ -2584,23 +2575,20 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_default_parameters_natives[] = {nullptr};
static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
nullptr};
- static const char* harmony_spreadcalls_natives[] = {
+ static const char* harmony_spread_calls_natives[] = {
"native harmony-spread.js", nullptr};
static const char* harmony_destructuring_natives[] = {nullptr};
- static const char* harmony_object_natives[] = {"native harmony-object.js",
- NULL};
static const char* harmony_object_observe_natives[] = {
"native harmony-object-observe.js", nullptr};
static const char* harmony_spread_arrays_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
- "native harmony-sharedarraybuffer.js", NULL};
- static const char* harmony_atomics_natives[] = {"native harmony-atomics.js",
- nullptr};
+ "native harmony-sharedarraybuffer.js", "native harmony-atomics.js", NULL};
static const char* harmony_new_target_natives[] = {nullptr};
static const char* harmony_concat_spreadable_natives[] = {
"native harmony-concat-spreadable.js", nullptr};
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
+ static const char* harmony_tolength_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2634,8 +2622,6 @@ bool Genesis::InstallExtraNatives() {
Handle<JSObject> extras_binding =
factory()->NewJSObject(isolate()->object_function());
- JSObject::NormalizeProperties(extras_binding, CLEAR_INOBJECT_PROPERTIES, 2,
- "container for binding to/from extra natives");
native_context()->set_extras_binding_object(*extras_binding);
for (int i = ExtraNatives::GetDebuggerCount();
@@ -2647,6 +2633,17 @@ bool Genesis::InstallExtraNatives() {
}
+bool Genesis::InstallExperimentalExtraNatives() {
+ for (int i = ExperimentalExtraNatives::GetDebuggerCount();
+ i < ExperimentalExtraNatives::GetBuiltinsCount(); i++) {
+ if (!Bootstrapper::CompileExperimentalExtraBuiltin(isolate(), i))
+ return false;
+ }
+
+ return true;
+}
+
+
bool Genesis::InstallDebuggerNatives() {
for (int i = 0; i < Natives::GetDebuggerCount(); ++i) {
if (!Bootstrapper::CompileBuiltin(isolate(), i)) return false;
@@ -2701,7 +2698,7 @@ void Genesis::InstallBuiltinFunctionIds() {
void Genesis::InstallExperimentalBuiltinFunctionIds() {
- if (FLAG_harmony_atomics) {
+ if (FLAG_harmony_sharedarraybuffer) {
struct BuiltinFunctionIds {
const char* holder_expr;
const char* fun_name;
@@ -2766,24 +2763,6 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
JSObject::AddProperty(global, natives_key, natives, DONT_ENUM);
}
- // Expose the stack trace symbol to native JS.
- RETURN_ON_EXCEPTION_VALUE(isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- handle(native_context->builtins(), isolate),
- factory->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("$stackTraceSymbol")),
- factory->stack_trace_symbol(), NONE),
- false);
-
- // Expose the internal error symbol to native JS
- RETURN_ON_EXCEPTION_VALUE(isolate,
- JSObject::SetOwnPropertyIgnoreAttributes(
- handle(native_context->builtins(), isolate),
- factory->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("$internalErrorSymbol")),
- factory->internal_error_symbol(), NONE),
- false);
-
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
@@ -2802,6 +2781,11 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
}
+
+#if defined(V8_WASM)
+ WasmJs::Install(isolate, global);
+#endif
+
return true;
}
@@ -3246,6 +3230,11 @@ Genesis::Genesis(Isolate* isolate,
if (!isolate->serializer_enabled()) {
InitializeExperimentalGlobal();
if (!InstallExperimentalNatives()) return;
+
+ if (FLAG_experimental_extras) {
+ if (!InstallExperimentalExtraNatives()) return;
+ }
+
// By now the utils object is useless and can be removed.
native_context()->set_natives_utils_object(
isolate->heap()->undefined_value());
@@ -3261,7 +3250,9 @@ Genesis::Genesis(Isolate* isolate,
// Check that the script context table is empty except for the 'this' binding.
// We do not need script contexts for native scripts.
- DCHECK_EQ(1, native_context()->script_context_table()->used());
+ if (!FLAG_global_var_shortcuts) {
+ DCHECK_EQ(1, native_context()->script_context_table()->used());
+ }
result_ = native_context();
}
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index e478681431..659d74aad2 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -52,7 +52,7 @@ class SourceCodeCache final BASE_EMBEDDED {
DCHECK(!str.is_null());
cache_->set(length, *str);
cache_->set(length + 1, *shared);
- Script::cast(shared->script())->set_type(Smi::FromInt(type_));
+ Script::cast(shared->script())->set_type(type_);
}
private:
@@ -115,13 +115,13 @@ class Bootstrapper final {
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
static bool CompileExtraBuiltin(Isolate* isolate, int index);
+ static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
static bool CompileCodeStubBuiltin(Isolate* isolate, int index);
static bool InstallCodeStubNatives(Isolate* isolate);
- static void ImportNatives(Isolate* isolate, Handle<JSObject> container);
- static void ImportExperimentalNatives(Isolate* isolate,
- Handle<JSObject> container);
- static bool InstallJSBuiltins(Isolate* isolate, Handle<JSObject> container);
+ static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
+ static void ExportExperimentalFromRuntime(Isolate* isolate,
+ Handle<JSObject> container);
private:
Isolate* isolate_;
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 31649d0fc2..13225d2065 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -2,22 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/builtins.h"
#include "src/api.h"
#include "src/api-natives.h"
#include "src/arguments.h"
#include "src/base/once.h"
#include "src/bootstrapper.h"
-#include "src/builtins.h"
-#include "src/cpu-profiler.h"
#include "src/elements.h"
#include "src/frames-inl.h"
#include "src/gdb-jit.h"
-#include "src/heap-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/prototype.h"
#include "src/vm-state-inl.h"
@@ -91,7 +90,6 @@ void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
BUILTIN_LIST_C(DEF_ARG_TYPE)
#undef DEF_ARG_TYPE
-} // namespace
// ----------------------------------------------------------------------------
// Support macro for defining builtins in C++.
@@ -136,7 +134,7 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
-static inline bool CalledAsConstructor(Isolate* isolate) {
+inline bool CalledAsConstructor(Isolate* isolate) {
// Calculate the result using a full stack frame iterator and check
// that the state of the stack is as we assume it to be in the
// code below.
@@ -166,30 +164,57 @@ static inline bool CalledAsConstructor(Isolate* isolate) {
// ----------------------------------------------------------------------------
-BUILTIN(Illegal) {
- UNREACHABLE();
- return isolate->heap()->undefined_value(); // Make compiler happy.
-}
-
-BUILTIN(EmptyFunction) {
- return isolate->heap()->undefined_value();
+inline bool ClampedToInteger(Object* object, int* out) {
+ // This is an extended version of ECMA-262 7.1.11 handling signed values
+ // Try to convert object to a number and clamp values to [kMinInt, kMaxInt]
+ if (object->IsSmi()) {
+ *out = Smi::cast(object)->value();
+ return true;
+ } else if (object->IsHeapNumber()) {
+ double value = HeapNumber::cast(object)->value();
+ if (std::isnan(value)) {
+ *out = 0;
+ } else if (value > kMaxInt) {
+ *out = kMaxInt;
+ } else if (value < kMinInt) {
+ *out = kMinInt;
+ } else {
+ *out = static_cast<int>(value);
+ }
+ return true;
+ } else if (object->IsUndefined() || object->IsNull()) {
+ *out = 0;
+ return true;
+ } else if (object->IsBoolean()) {
+ *out = object->IsTrue();
+ return true;
+ }
+ return false;
}
-static void MoveDoubleElements(FixedDoubleArray* dst, int dst_index,
- FixedDoubleArray* src, int src_index, int len) {
- if (len == 0) return;
- MemMove(dst->data_start() + dst_index, src->data_start() + src_index,
- len * kDoubleSize);
+inline bool GetSloppyArgumentsLength(Isolate* isolate, Handle<JSObject> object,
+ int* out) {
+ Map* arguments_map =
+ isolate->context()->native_context()->sloppy_arguments_map();
+ if (object->map() != arguments_map || !object->HasFastElements()) {
+ return false;
+ }
+ Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ if (!len_obj->IsSmi()) {
+ return false;
+ }
+ *out = Smi::cast(len_obj)->value();
+ return *out <= object->elements()->length();
}
-static bool ArrayPrototypeHasNoElements(PrototypeIterator* iter) {
+inline bool PrototypeHasNoElements(PrototypeIterator* iter) {
DisallowHeapAllocation no_gc;
for (; !iter->IsAtEnd(); iter->Advance()) {
if (iter->GetCurrent()->IsJSProxy()) return false;
- JSObject* current = JSObject::cast(iter->GetCurrent());
+ JSObject* current = iter->GetCurrent<JSObject>();
if (current->IsAccessCheckNeeded()) return false;
if (current->HasIndexedInterceptor()) return false;
if (current->elements()->length() != 0) return false;
@@ -198,8 +223,8 @@ static bool ArrayPrototypeHasNoElements(PrototypeIterator* iter) {
}
-static inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
- JSArray* receiver) {
+inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
+ JSArray* receiver) {
DisallowHeapAllocation no_gc;
// If the array prototype chain is intact (and free of elements), and if the
// receiver's prototype is the array prototype, then we are done.
@@ -212,16 +237,14 @@ static inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
// Slow case.
PrototypeIterator iter(isolate, receiver);
- return ArrayPrototypeHasNoElements(&iter);
+ return PrototypeHasNoElements(&iter);
}
// Returns empty handle if not applicable.
MUST_USE_RESULT
-static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
- Isolate* isolate,
- Handle<Object> receiver,
- Arguments* args,
+inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
+ Isolate* isolate, Handle<Object> receiver, Arguments* args,
int first_added_arg) {
if (!receiver->IsJSArray()) return MaybeHandle<FixedArrayBase>();
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -284,17 +307,10 @@ static inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
}
-MUST_USE_RESULT static Object* CallJsBuiltin(
- Isolate* isolate,
- const char* name,
+MUST_USE_RESULT static Object* CallJsIntrinsic(
+ Isolate* isolate, Handle<JSFunction> function,
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
HandleScope handleScope(isolate);
-
- Handle<Object> js_builtin = Object::GetProperty(
- isolate,
- handle(isolate->native_context()->builtins(), isolate),
- name).ToHandleChecked();
- Handle<JSFunction> function = Handle<JSFunction>::cast(js_builtin);
int argc = args.length() - 1;
ScopedVector<Handle<Object> > argv(argc);
for (int i = 0; i < argc; ++i) {
@@ -312,6 +328,18 @@ MUST_USE_RESULT static Object* CallJsBuiltin(
}
+} // namespace
+
+
+BUILTIN(Illegal) {
+ UNREACHABLE();
+ return isolate->heap()->undefined_value(); // Make compiler happy.
+}
+
+
+BUILTIN(EmptyFunction) { return isolate->heap()->undefined_value(); }
+
+
BUILTIN(ArrayPush) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
@@ -319,7 +347,7 @@ BUILTIN(ArrayPush) {
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
- return CallJsBuiltin(isolate, "$arrayPush", args);
+ return CallJsIntrinsic(isolate, isolate->array_push(), args);
}
// Fast Elements Path
int push_size = args.length() - 1;
@@ -330,12 +358,11 @@ BUILTIN(ArrayPush) {
}
if (push_size > 0 &&
JSArray::WouldChangeReadOnlyLength(array, len + push_size)) {
- return CallJsBuiltin(isolate, "$arrayPush", args);
+ return CallJsIntrinsic(isolate, isolate->array_push(), args);
}
DCHECK(!array->map()->is_observed());
ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = accessor->Push(array, elms_obj, &args[1], push_size,
- ElementsAccessor::kDirectionReverse);
+ int new_length = accessor->Push(array, elms_obj, &args, push_size);
return Smi::FromInt(new_length);
}
@@ -347,7 +374,7 @@ BUILTIN(ArrayPop) {
EnsureJSArrayWithWritableFastElements(isolate, receiver, NULL, 0);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
- return CallJsBuiltin(isolate, "$arrayPop", args);
+ return CallJsIntrinsic(isolate, isolate->array_pop(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
@@ -357,16 +384,21 @@ BUILTIN(ArrayPop) {
if (len == 0) return isolate->heap()->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
- return CallJsBuiltin(isolate, "$arrayPop", args);
+ return CallJsIntrinsic(isolate, isolate->array_pop(), args);
}
- uint32_t new_length = len - 1;
- Handle<Object> element;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, element, Object::GetElement(isolate, array, new_length));
-
- JSArray::SetLength(array, new_length);
- return *element;
+ Handle<Object> result;
+ if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
+ // Fast Elements Path
+ result = array->GetElementsAccessor()->Pop(array, elms_obj);
+ } else {
+ // Use Slow Lookup otherwise
+ uint32_t new_length = len - 1;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::GetElement(isolate, array, new_length));
+ JSArray::SetLength(array, new_length);
+ }
+ return *result;
}
@@ -379,7 +411,7 @@ BUILTIN(ArrayShift) {
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj) ||
!IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) {
- return CallJsBuiltin(isolate, "$arrayShift", args);
+ return CallJsIntrinsic(isolate, isolate->array_shift(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
@@ -388,33 +420,10 @@ BUILTIN(ArrayShift) {
if (len == 0) return heap->undefined_value();
if (JSArray::HasReadOnlyLength(array)) {
- return CallJsBuiltin(isolate, "$arrayShift", args);
- }
-
- // Get first element
- Handle<Object> first;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, first,
- Object::GetElement(isolate, array, 0));
-
- if (heap->CanMoveObjectStart(*elms_obj)) {
- array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1));
- } else {
- // Shift the elements.
- if (elms_obj->IsFixedArray()) {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- DisallowHeapAllocation no_gc;
- heap->MoveElements(*elms, 0, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
- } else {
- Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
- MoveDoubleElements(*elms, 0, *elms, 1, len - 1);
- elms->set_the_hole(len - 1);
- }
+ return CallJsIntrinsic(isolate, isolate->array_shift(), args);
}
- // Set the length.
- array->set_length(Smi::FromInt(len - 1));
-
+ Handle<Object> first = array->GetElementsAccessor()->Shift(array, elms_obj);
return *first;
}
@@ -426,55 +435,24 @@ BUILTIN(ArrayUnshift) {
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 1);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
- return CallJsBuiltin(isolate, "$arrayUnshift", args);
+ return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
- if (!array->HasFastSmiOrObjectElements()) {
- return CallJsBuiltin(isolate, "$arrayUnshift", args);
- }
- int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
- int new_length = len + to_add;
+ if (to_add == 0) {
+ return array->length();
+ }
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
- DCHECK(to_add <= (Smi::kMaxValue - len));
-
- if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
- return CallJsBuiltin(isolate, "$arrayUnshift", args);
- }
+ DCHECK(to_add <= (Smi::kMaxValue - Smi::cast(array->length())->value()));
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Handle<FixedArray> new_elms =
- isolate->factory()->NewUninitializedFixedArray(capacity);
-
- ElementsKind kind = array->GetElementsKind();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->CopyElements(
- elms, 0, kind, new_elms, to_add,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
-
- elms = new_elms;
- array->set_elements(*elms);
- } else {
- DisallowHeapAllocation no_gc;
- Heap* heap = isolate->heap();
- heap->MoveElements(*elms, to_add, 0, len);
+ if (to_add > 0 && JSArray::HasReadOnlyLength(array)) {
+ return CallJsIntrinsic(isolate, isolate->array_unshift(), args);
}
- // Add the provided values.
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < to_add; i++) {
- elms->set(i, args[i + 1], mode);
- }
-
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ int new_length = accessor->Unshift(array, elms_obj, &args, to_add);
return Smi::FromInt(new_length);
}
@@ -482,393 +460,948 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
+ Handle<JSObject> object;
+ Handle<FixedArrayBase> elms_obj;
int len = -1;
int relative_start = 0;
int relative_end = 0;
- {
- DisallowHeapAllocation no_gc;
- if (receiver->IsJSArray()) {
- JSArray* array = JSArray::cast(*receiver);
- if (!IsJSArrayFastElementMovingAllowed(isolate, array)) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
-
- if (!array->HasFastElements()) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
+ bool is_sloppy_arguments = false;
- len = Smi::cast(array->length())->value();
- } else {
- // Array.slice(arguments, ...) is quite a common idiom (notably more
- // than 50% of invocations in Web apps). Treat it in C++ as well.
- Map* arguments_map =
- isolate->context()->native_context()->sloppy_arguments_map();
-
- bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject() &&
- JSObject::cast(*receiver)->map() == arguments_map;
- if (!is_arguments_object_with_fast_elements) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
- JSObject* object = JSObject::cast(*receiver);
-
- if (!object->HasFastElements()) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
-
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
- if (!len_obj->IsSmi()) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
- len = Smi::cast(len_obj)->value();
- if (len > object->elements()->length()) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
+ if (receiver->IsJSArray()) {
+ DisallowHeapAllocation no_gc;
+ JSArray* array = JSArray::cast(*receiver);
+ if (!array->HasFastElements() ||
+ !IsJSArrayFastElementMovingAllowed(isolate, array)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
-
- DCHECK(len >= 0);
- int n_arguments = args.length() - 1;
-
- // Note carefully choosen defaults---if argument is missing,
- // it's undefined which gets converted to 0 for relative_start
- // and to len for relative_end.
- relative_start = 0;
- relative_end = len;
- if (n_arguments > 0) {
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
- relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
- } else if (!arg1->IsUndefined()) {
+ len = Smi::cast(array->length())->value();
+ object = Handle<JSObject>::cast(receiver);
+ elms_obj = handle(array->elements(), isolate);
+ } else if (receiver->IsJSObject() &&
+ GetSloppyArgumentsLength(isolate, Handle<JSObject>::cast(receiver),
+ &len)) {
+ // Array.prototype.slice(arguments, ...) is quite a common idiom
+ // (notably more than 50% of invocations in Web apps).
+ // Treat it in C++ as well.
+ is_sloppy_arguments = true;
+ object = Handle<JSObject>::cast(receiver);
+ elms_obj = handle(object->elements(), isolate);
+ } else {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ DCHECK(len >= 0);
+ int argument_count = args.length() - 1;
+ // Note carefully chosen defaults---if argument is missing,
+ // it's undefined which gets converted to 0 for relative_start
+ // and to len for relative_end.
+ relative_start = 0;
+ relative_end = len;
+ if (argument_count > 0) {
+ DisallowHeapAllocation no_gc;
+ if (!ClampedToInteger(args[1], &relative_start)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ if (argument_count > 1) {
+ Object* end_arg = args[2];
+ // slice handles the end_arg specially
+ if (end_arg->IsUndefined()) {
+ relative_end = len;
+ } else if (!ClampedToInteger(end_arg, &relative_end)) {
AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
- if (n_arguments > 1) {
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- relative_end = Smi::cast(arg2)->value();
- } else if (arg2->IsHeapNumber()) {
- double end = HeapNumber::cast(arg2)->value();
- if (end < kMinInt || end > kMaxInt) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
- relative_end = std::isnan(end) ? 0 : static_cast<int>(end);
- } else if (!arg2->IsUndefined()) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
}
}
}
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 6.
- int k = (relative_start < 0) ? Max(len + relative_start, 0)
- : Min(relative_start, len);
+ uint32_t actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
+ : Min(relative_start, len);
// ECMAScript 232, 3rd Edition, Section 15.4.4.10, step 8.
- int final = (relative_end < 0) ? Max(len + relative_end, 0)
- : Min(relative_end, len);
-
- // Calculate the length of result array.
- int result_len = Max(final - k, 0);
+ uint32_t actual_end =
+ (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len);
- Handle<JSObject> object = Handle<JSObject>::cast(receiver);
- Handle<FixedArrayBase> elms(object->elements(), isolate);
-
- ElementsKind kind = object->GetElementsKind();
- if (IsHoleyElementsKind(kind)) {
- DisallowHeapAllocation no_gc;
- bool packed = true;
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- for (int i = k; i < final; i++) {
- if (!accessor->HasElement(object, i, elms)) {
- packed = false;
- break;
- }
- }
- if (packed) {
- kind = GetPackedElementsKind(kind);
- } else if (!receiver->IsJSArray()) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySlice", args);
- }
+ if (actual_end <= actual_start) {
+ Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+ GetPackedElementsKind(object->GetElementsKind()), 0, 0);
+ return *result_array;
}
- Handle<JSArray> result_array =
- isolate->factory()->NewJSArray(kind, result_len, result_len);
-
- DisallowHeapAllocation no_gc;
- if (result_len == 0) return *result_array;
-
ElementsAccessor* accessor = object->GetElementsAccessor();
- accessor->CopyElements(
- elms, k, kind, handle(result_array->elements(), isolate), 0, result_len);
+ if (is_sloppy_arguments &&
+ !accessor->IsPacked(object, elms_obj, actual_start, actual_end)) {
+ // Don't deal with arguments with holes in C++
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_slice(), args);
+ }
+ Handle<JSArray> result_array =
+ accessor->Slice(object, elms_obj, actual_start, actual_end);
return *result_array;
}
BUILTIN(ArraySplice) {
HandleScope scope(isolate);
- Heap* heap = isolate->heap();
Handle<Object> receiver = args.receiver();
MaybeHandle<FixedArrayBase> maybe_elms_obj =
EnsureJSArrayWithWritableFastElements(isolate, receiver, &args, 3);
Handle<FixedArrayBase> elms_obj;
if (!maybe_elms_obj.ToHandle(&elms_obj)) {
- return CallJsBuiltin(isolate, "$arraySplice", args);
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
Handle<JSArray> array = Handle<JSArray>::cast(receiver);
DCHECK(!array->map()->is_observed());
- int len = Smi::cast(array->length())->value();
-
- int n_arguments = args.length() - 1;
-
+ int argument_count = args.length() - 1;
int relative_start = 0;
- if (n_arguments > 0) {
+ if (argument_count > 0) {
DisallowHeapAllocation no_gc;
- Object* arg1 = args[1];
- if (arg1->IsSmi()) {
- relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySplice", args);
- }
- relative_start = std::isnan(start) ? 0 : static_cast<int>(start);
- } else if (!arg1->IsUndefined()) {
+ if (!ClampedToInteger(args[1], &relative_start)) {
AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySplice", args);
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
}
+ int len = Smi::cast(array->length())->value();
+ // clip relative start to [0, len]
int actual_start = (relative_start < 0) ? Max(len + relative_start, 0)
: Min(relative_start, len);
- // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
- // given as a request to delete all the elements from the start.
- // And it differs from the case of undefined delete count.
- // This does not follow ECMA-262, but we do the same for
- // compatibility.
int actual_delete_count;
- if (n_arguments == 1) {
+ if (argument_count == 1) {
+ // SpiderMonkey, TraceMonkey and JSC treat the case where no delete count is
+ // given as a request to delete all the elements from the start.
+ // And it differs from the case of undefined delete count.
+ // This does not follow ECMA-262, but we do the same for compatibility.
DCHECK(len - actual_start >= 0);
actual_delete_count = len - actual_start;
} else {
- int value = 0; // ToInteger(undefined) == 0
- if (n_arguments > 1) {
- DisallowHeapAllocation no_gc;
- Object* arg2 = args[2];
- if (arg2->IsSmi()) {
- value = Smi::cast(arg2)->value();
- } else {
+ int delete_count = 0;
+ DisallowHeapAllocation no_gc;
+ if (argument_count > 1) {
+ if (!ClampedToInteger(args[2], &delete_count)) {
AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySplice", args);
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
}
- actual_delete_count = Min(Max(value, 0), len - actual_start);
+ actual_delete_count = Min(Max(delete_count, 0), len - actual_start);
+ }
+
+ int add_count = (argument_count > 1) ? (argument_count - 2) : 0;
+ int new_length = len - actual_delete_count + add_count;
+
+ if (new_length != len && JSArray::HasReadOnlyLength(array)) {
+ AllowHeapAllocation allow_allocation;
+ return CallJsIntrinsic(isolate, isolate->array_splice(), args);
}
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ Handle<JSArray> result_array = accessor->Splice(
+ array, elms_obj, actual_start, actual_delete_count, &args, add_count);
+ return *result_array;
+}
- ElementsKind elements_kind = array->GetElementsKind();
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actual_delete_count + item_count;
+// Array Concat -------------------------------------------------------------
+
+namespace {
+
+/**
+ * A simple visitor visits every element of Array's.
+ * The backend storage can be a fixed array for fast elements case,
+ * or a dictionary for sparse array. Since Dictionary is a subtype
+ * of FixedArray, the class can be used by both fast and slow cases.
+ * The second parameter of the constructor, fast_elements, specifies
+ * whether the storage is a FixedArray or Dictionary.
+ *
+ * An index limit is used to deal with the situation that a result array
+ * length overflows 32-bit non-negative integer.
+ */
+class ArrayConcatVisitor {
+ public:
+ ArrayConcatVisitor(Isolate* isolate, Handle<FixedArray> storage,
+ bool fast_elements)
+ : isolate_(isolate),
+ storage_(Handle<FixedArray>::cast(
+ isolate->global_handles()->Create(*storage))),
+ index_offset_(0u),
+ bit_field_(FastElementsField::encode(fast_elements) |
+ ExceedsLimitField::encode(false)) {}
+
+ ~ArrayConcatVisitor() { clear_storage(); }
+
+ void visit(uint32_t i, Handle<Object> elm) {
+ if (i >= JSObject::kMaxElementCount - index_offset_) {
+ set_exceeds_array_limit(true);
+ return;
+ }
+ uint32_t index = index_offset_ + i;
- // For double mode we do not support changing the length.
- if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
- return CallJsBuiltin(isolate, "$arraySplice", args);
+ if (fast_elements()) {
+ if (index < static_cast<uint32_t>(storage_->length())) {
+ storage_->set(index, *elm);
+ return;
+ }
+ // Our initial estimate of length was foiled, possibly by
+ // getters on the arrays increasing the length of later arrays
+ // during iteration.
+ // This shouldn't happen in anything but pathological cases.
+ SetDictionaryMode();
+ // Fall-through to dictionary mode.
+ }
+ DCHECK(!fast_elements());
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(*storage_));
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
+ Handle<SeededNumberDictionary> result =
+ SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
+ if (!result.is_identical_to(dict)) {
+ // Dictionary needed to grow.
+ clear_storage();
+ set_storage(*result);
+ }
}
- if (new_length != len && JSArray::HasReadOnlyLength(array)) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arraySplice", args);
+ void increase_index_offset(uint32_t delta) {
+ if (JSObject::kMaxElementCount - index_offset_ < delta) {
+ index_offset_ = JSObject::kMaxElementCount;
+ } else {
+ index_offset_ += delta;
+ }
+ // If the initial length estimate was off (see special case in visit()),
+ // but the array blowing the limit didn't contain elements beyond the
+ // provided-for index range, go to dictionary mode now.
+ if (fast_elements() &&
+ index_offset_ >
+ static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
+ SetDictionaryMode();
+ }
}
- if (new_length == 0) {
- Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
- elms_obj, elements_kind, actual_delete_count);
- array->set_elements(heap->empty_fixed_array());
- array->set_length(Smi::FromInt(0));
- return *result;
+ bool exceeds_array_limit() const {
+ return ExceedsLimitField::decode(bit_field_);
}
- Handle<JSArray> result_array =
- isolate->factory()->NewJSArray(elements_kind,
- actual_delete_count,
- actual_delete_count);
+ Handle<JSArray> ToArray() {
+ Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
+ Handle<Object> length =
+ isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
+ Handle<Map> map = JSObject::GetElementsTransitionMap(
+ array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
+ array->set_map(*map);
+ array->set_length(*length);
+ array->set_elements(*storage_);
+ return array;
+ }
- if (actual_delete_count > 0) {
- DisallowHeapAllocation no_gc;
- ElementsAccessor* accessor = array->GetElementsAccessor();
- accessor->CopyElements(
- elms_obj, actual_start, elements_kind,
- handle(result_array->elements(), isolate), 0, actual_delete_count);
- }
-
- bool elms_changed = false;
- if (item_count < actual_delete_count) {
- // Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(*elms_obj) &&
- ((actual_start + item_count) <
- (len - actual_delete_count - actual_start));
- if (trim_array) {
- const int delta = actual_delete_count - item_count;
-
- if (elms_obj->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> elms =
- Handle<FixedDoubleArray>::cast(elms_obj);
- MoveDoubleElements(*elms, delta, *elms, 0, actual_start);
- } else {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- DisallowHeapAllocation no_gc;
- heap->MoveElements(*elms, delta, 0, actual_start);
+ private:
+ // Convert storage to dictionary mode.
+ void SetDictionaryMode() {
+ DCHECK(fast_elements());
+ Handle<FixedArray> current_storage(*storage_);
+ Handle<SeededNumberDictionary> slow_storage(
+ SeededNumberDictionary::New(isolate_, current_storage->length()));
+ uint32_t current_length = static_cast<uint32_t>(current_storage->length());
+ for (uint32_t i = 0; i < current_length; i++) {
+ HandleScope loop_scope(isolate_);
+ Handle<Object> element(current_storage->get(i), isolate_);
+ if (!element->IsTheHole()) {
+ // The object holding this backing store has just been allocated, so
+ // it cannot yet be used as a prototype.
+ Handle<SeededNumberDictionary> new_storage =
+ SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
+ false);
+ if (!new_storage.is_identical_to(slow_storage)) {
+ slow_storage = loop_scope.CloseAndEscape(new_storage);
+ }
}
+ }
+ clear_storage();
+ set_storage(*slow_storage);
+ set_fast_elements(false);
+ }
- if (heap->CanMoveObjectStart(*elms_obj)) {
- // On the fast path we move the start of the object in memory.
- elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta));
- } else {
- // This is the slow path. We are going to move the elements to the left
- // by copying them. For trimmed values we store the hole.
- if (elms_obj->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> elms =
- Handle<FixedDoubleArray>::cast(elms_obj);
- MoveDoubleElements(*elms, 0, *elms, delta, len - delta);
- elms->FillWithHoles(len - delta, len);
- } else {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- DisallowHeapAllocation no_gc;
- heap->MoveElements(*elms, 0, delta, len - delta);
- elms->FillWithHoles(len - delta, len);
+ inline void clear_storage() {
+ GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
+ }
+
+ inline void set_storage(FixedArray* storage) {
+ storage_ =
+ Handle<FixedArray>::cast(isolate_->global_handles()->Create(storage));
+ }
+
+ class FastElementsField : public BitField<bool, 0, 1> {};
+ class ExceedsLimitField : public BitField<bool, 1, 1> {};
+
+ bool fast_elements() const { return FastElementsField::decode(bit_field_); }
+ void set_fast_elements(bool fast) {
+ bit_field_ = FastElementsField::update(bit_field_, fast);
+ }
+ void set_exceeds_array_limit(bool exceeds) {
+ bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
+ }
+
+ Isolate* isolate_;
+ Handle<FixedArray> storage_; // Always a global handle.
+ // Index after last seen index. Always less than or equal to
+ // JSObject::kMaxElementCount.
+ uint32_t index_offset_;
+ uint32_t bit_field_;
+};
+
+
+uint32_t EstimateElementCount(Handle<JSArray> array) {
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ int element_count = 0;
+ switch (array->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ Handle<FixedArray> elements(FixedArray::cast(array->elements()));
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->get(i)->IsTheHole()) element_count++;
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ // Fast elements can't have lengths that are not representable by
+ // a 32-bit signed integer.
+ DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+ int fast_length = static_cast<int>(length);
+ if (array->elements()->IsFixedArray()) {
+ DCHECK(FixedArray::cast(array->elements())->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(array->elements()));
+ for (int i = 0; i < fast_length; i++) {
+ if (!elements->is_the_hole(i)) element_count++;
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> dictionary(
+ SeededNumberDictionary::cast(array->elements()));
+ int capacity = dictionary->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
+ if (dictionary->IsKey(*key)) {
+ element_count++;
}
}
- elms_changed = true;
+ break;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ // External arrays are always dense.
+ return length;
+ }
+ // As an estimate, we assume that the prototype doesn't contain any
+ // inherited elements.
+ return element_count;
+}
+
+
+template <class ExternalArrayClass, class ElementType>
+void IterateTypedArrayElements(Isolate* isolate, Handle<JSObject> receiver,
+ bool elements_are_ints,
+ bool elements_are_guaranteed_smis,
+ ArrayConcatVisitor* visitor) {
+ Handle<ExternalArrayClass> array(
+ ExternalArrayClass::cast(receiver->elements()));
+ uint32_t len = static_cast<uint32_t>(array->length());
+
+ DCHECK(visitor != NULL);
+ if (elements_are_ints) {
+ if (elements_are_guaranteed_smis) {
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
+ isolate);
+ visitor->visit(j, e);
+ }
} else {
- if (elms_obj->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> elms =
- Handle<FixedDoubleArray>::cast(elms_obj);
- MoveDoubleElements(*elms, actual_start + item_count,
- *elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- elms->FillWithHoles(new_length, len);
- } else {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- DisallowHeapAllocation no_gc;
- heap->MoveElements(*elms, actual_start + item_count,
- actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- elms->FillWithHoles(new_length, len);
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ int64_t val = static_cast<int64_t>(array->get_scalar(j));
+ if (Smi::IsValid(static_cast<intptr_t>(val))) {
+ Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
+ visitor->visit(j, e);
+ } else {
+ Handle<Object> e =
+ isolate->factory()->NewNumber(static_cast<ElementType>(val));
+ visitor->visit(j, e);
+ }
+ }
+ }
+ } else {
+ for (uint32_t j = 0; j < len; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> e = isolate->factory()->NewNumber(array->get_scalar(j));
+ visitor->visit(j, e);
+ }
+ }
+}
+
+
+// Used for sorting indices in a List<uint32_t>.
+int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
+ uint32_t a = *ap;
+ uint32_t b = *bp;
+ return (a == b) ? 0 : (a < b) ? -1 : 1;
+}
+
+
+void CollectElementIndices(Handle<JSObject> object, uint32_t range,
+ List<uint32_t>* indices) {
+ Isolate* isolate = object->GetIsolate();
+ ElementsKind kind = object->GetElementsKind();
+ switch (kind) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ Handle<FixedArray> elements(FixedArray::cast(object->elements()));
+ uint32_t length = static_cast<uint32_t>(elements->length());
+ if (range < length) length = range;
+ for (uint32_t i = 0; i < length; i++) {
+ if (!elements->get(i)->IsTheHole()) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ if (object->elements()->IsFixedArray()) {
+ DCHECK(object->elements()->length() == 0);
+ break;
+ }
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(object->elements()));
+ uint32_t length = static_cast<uint32_t>(elements->length());
+ if (range < length) length = range;
+ for (uint32_t i = 0; i < length; i++) {
+ if (!elements->is_the_hole(i)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> dict(
+ SeededNumberDictionary::cast(object->elements()));
+ uint32_t capacity = dict->Capacity();
+ for (uint32_t j = 0; j < capacity; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> k(dict->KeyAt(j), isolate);
+ if (dict->IsKey(*k)) {
+ DCHECK(k->IsNumber());
+ uint32_t index = static_cast<uint32_t>(k->Number());
+ if (index < range) {
+ indices->Add(index);
+ }
+ }
+ }
+ break;
+ }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) case TYPE##_ELEMENTS:
+
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+ {
+ uint32_t length = static_cast<uint32_t>(
+ FixedArrayBase::cast(object->elements())->length());
+ if (range <= length) {
+ length = range;
+ // We will add all indices, so we might as well clear it first
+ // and avoid duplicates.
+ indices->Clear();
+ }
+ for (uint32_t i = 0; i < length; i++) {
+ indices->Add(i);
+ }
+ if (length == range) return; // All indices accounted for already.
+ break;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+ ElementsAccessor* accessor = object->GetElementsAccessor();
+ for (uint32_t i = 0; i < range; i++) {
+ if (accessor->HasElement(object, i)) {
+ indices->Add(i);
+ }
+ }
+ break;
+ }
+ }
+
+ PrototypeIterator iter(isolate, object);
+ if (!iter.IsAtEnd()) {
+ // The prototype will usually have no inherited element indices,
+ // but we have to check.
+ CollectElementIndices(PrototypeIterator::GetCurrent<JSObject>(iter), range,
+ indices);
+ }
+}
+
+
+bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
+ uint32_t length, ArrayConcatVisitor* visitor) {
+ for (uint32_t i = 0; i < length; ++i) {
+ HandleScope loop_scope(isolate);
+ Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
+ Handle<Object> element_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
+ Object::GetElement(isolate, receiver, i),
+ false);
+ visitor->visit(i, element_value);
+ }
+ }
+ visitor->increase_index_offset(length);
+ return true;
+}
+
+
+/**
+ * A helper function that visits elements of a JSObject in numerical
+ * order.
+ *
+ * The visitor argument called for each existing element in the array
+ * with the element index and the element's value.
+ * Afterwards it increments the base-index of the visitor by the array
+ * length.
+ * Returns false if any access threw an exception, otherwise true.
+ */
+bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
+ ArrayConcatVisitor* visitor) {
+ uint32_t length = 0;
+
+ if (receiver->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(receiver));
+ length = static_cast<uint32_t>(array->length()->Number());
+ } else {
+ Handle<Object> val;
+ Handle<Object> key(isolate->heap()->length_string(), isolate);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, val, Runtime::GetObjectProperty(isolate, receiver, key),
+ false);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
+ Object::ToLength(isolate, val), false);
+ // TODO(caitp): Support larger element indexes (up to 2^53-1).
+ if (!val->ToUint32(&length)) {
+ length = 0;
+ }
+ }
+
+ if (!(receiver->IsJSArray() || receiver->IsJSTypedArray())) {
+ // For classes which are not known to be safe to access via elements alone,
+ // use the slow case.
+ return IterateElementsSlow(isolate, receiver, length, visitor);
+ }
+
+ switch (receiver->GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
+ int fast_length = static_cast<int>(length);
+ DCHECK(fast_length <= elements->length());
+ for (int j = 0; j < fast_length; j++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> element_value(elements->get(j), isolate);
+ if (!element_value->IsTheHole()) {
+ visitor->visit(j, element_value);
+ } else {
+ Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
+ // Call GetElement on receiver, not its prototype, or getters won't
+ // have the correct receiver.
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value,
+ Object::GetElement(isolate, receiver, j), false);
+ visitor->visit(j, element_value);
+ }
+ }
+ }
+ break;
+ }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
+ // Run through the elements FixedArray and use HasElement and GetElement
+ // to check the prototype for missing elements.
+ if (receiver->elements()->IsFixedArray()) {
+ DCHECK(receiver->elements()->length() == 0);
+ break;
}
+ Handle<FixedDoubleArray> elements(
+ FixedDoubleArray::cast(receiver->elements()));
+ int fast_length = static_cast<int>(length);
+ DCHECK(fast_length <= elements->length());
+ for (int j = 0; j < fast_length; j++) {
+ HandleScope loop_scope(isolate);
+ if (!elements->is_the_hole(j)) {
+ double double_value = elements->get_scalar(j);
+ Handle<Object> element_value =
+ isolate->factory()->NewNumber(double_value);
+ visitor->visit(j, element_value);
+ } else {
+ Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
+ // Call GetElement on receiver, not its prototype, or getters won't
+ // have the correct receiver.
+ Handle<Object> element_value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element_value,
+ Object::GetElement(isolate, receiver, j), false);
+ visitor->visit(j, element_value);
+ }
+ }
+ }
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
+ List<uint32_t> indices(dict->Capacity() / 2);
+ // Collect all indices in the object and the prototypes less
+ // than length. This might introduce duplicates in the indices list.
+ CollectElementIndices(receiver, length, &indices);
+ indices.Sort(&compareUInt32);
+ int j = 0;
+ int n = indices.length();
+ while (j < n) {
+ HandleScope loop_scope(isolate);
+ uint32_t index = indices[j];
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element, Object::GetElement(isolate, receiver, index),
+ false);
+ visitor->visit(index, element);
+ // Skip to next different index (i.e., omit duplicates).
+ do {
+ j++;
+ } while (j < n && indices[j] == index);
+ }
+ break;
+ }
+ case UINT8_CLAMPED_ELEMENTS: {
+ Handle<FixedUint8ClampedArray> pixels(
+ FixedUint8ClampedArray::cast(receiver->elements()));
+ for (uint32_t j = 0; j < length; j++) {
+ Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
+ visitor->visit(j, e);
+ }
+ break;
+ }
+ case INT8_ELEMENTS: {
+ IterateTypedArrayElements<FixedInt8Array, int8_t>(isolate, receiver, true,
+ true, visitor);
+ break;
+ }
+ case UINT8_ELEMENTS: {
+ IterateTypedArrayElements<FixedUint8Array, uint8_t>(isolate, receiver,
+ true, true, visitor);
+ break;
+ }
+ case INT16_ELEMENTS: {
+ IterateTypedArrayElements<FixedInt16Array, int16_t>(isolate, receiver,
+ true, true, visitor);
+ break;
+ }
+ case UINT16_ELEMENTS: {
+ IterateTypedArrayElements<FixedUint16Array, uint16_t>(
+ isolate, receiver, true, true, visitor);
+ break;
+ }
+ case INT32_ELEMENTS: {
+ IterateTypedArrayElements<FixedInt32Array, int32_t>(isolate, receiver,
+ true, false, visitor);
+ break;
+ }
+ case UINT32_ELEMENTS: {
+ IterateTypedArrayElements<FixedUint32Array, uint32_t>(
+ isolate, receiver, true, false, visitor);
+ break;
}
- } else if (item_count > actual_delete_count) {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- DCHECK((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
-
- // Check if array need to grow.
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- Handle<FixedArray> new_elms =
- isolate->factory()->NewUninitializedFixedArray(capacity);
-
- DisallowHeapAllocation no_gc;
-
- ElementsKind kind = array->GetElementsKind();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- if (actual_start > 0) {
- // Copy the part before actual_start as is.
- accessor->CopyElements(
- elms, 0, kind, new_elms, 0, actual_start);
+ case FLOAT32_ELEMENTS: {
+ IterateTypedArrayElements<FixedFloat32Array, float>(
+ isolate, receiver, false, false, visitor);
+ break;
+ }
+ case FLOAT64_ELEMENTS: {
+ IterateTypedArrayElements<FixedFloat64Array, double>(
+ isolate, receiver, false, false, visitor);
+ break;
+ }
+ case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+ case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+ for (uint32_t index = 0; index < length; index++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> element;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, element, Object::GetElement(isolate, receiver, index),
+ false);
+ visitor->visit(index, element);
}
- accessor->CopyElements(
- elms, actual_start + actual_delete_count, kind,
- new_elms, actual_start + item_count,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
+ break;
+ }
+ }
+ visitor->increase_index_offset(length);
+ return true;
+}
+
+
+bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
+ if (!FLAG_harmony_concat_spreadable) return false;
+ Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+ Maybe<bool> maybe =
+ JSReceiver::HasProperty(Handle<JSReceiver>::cast(obj), key);
+ if (!maybe.IsJust()) return false;
+ return maybe.FromJust();
+}
+
- elms_obj = new_elms;
- elms_changed = true;
+bool IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
+ HandleScope handle_scope(isolate);
+ if (!obj->IsSpecObject()) return false;
+ if (FLAG_harmony_concat_spreadable) {
+ Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+ Handle<Object> value;
+ MaybeHandle<Object> maybeValue =
+ i::Runtime::GetObjectProperty(isolate, obj, key);
+ if (maybeValue.ToHandle(&value) && !value->IsUndefined()) {
+ return value->BooleanValue();
+ }
+ }
+ return obj->IsJSArray();
+}
+
+
+/**
+ * Array::concat implementation.
+ * See ECMAScript 262, 15.4.4.4.
+ * TODO(581): Fix non-compliance for very large concatenations and update to
+ * following the ECMAScript 5 specification.
+ */
+Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
+ int argument_count = args->length();
+
+ // Pass 1: estimate the length and number of elements of the result.
+ // The actual length can be larger if any of the arguments have getters
+ // that mutate other arguments (but will otherwise be precise).
+ // The number of elements is precise if there are no inherited elements.
+
+ ElementsKind kind = FAST_SMI_ELEMENTS;
+
+ uint32_t estimate_result_length = 0;
+ uint32_t estimate_nof_elements = 0;
+ for (int i = 0; i < argument_count; i++) {
+ HandleScope loop_scope(isolate);
+ Handle<Object> obj((*args)[i], isolate);
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ length_estimate = static_cast<uint32_t>(array->length()->Number());
+ if (length_estimate != 0) {
+ ElementsKind array_kind =
+ GetPackedElementsKind(array->map()->elements_kind());
+ kind = GetMoreGeneralElementsKind(kind, array_kind);
+ }
+ element_estimate = EstimateElementCount(array);
+ } else {
+ if (obj->IsHeapObject()) {
+ if (obj->IsNumber()) {
+ kind = GetMoreGeneralElementsKind(kind, FAST_DOUBLE_ELEMENTS);
+ } else {
+ kind = GetMoreGeneralElementsKind(kind, FAST_ELEMENTS);
+ }
+ }
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
} else {
- DisallowHeapAllocation no_gc;
- heap->MoveElements(*elms, actual_start + item_count,
- actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
+ estimate_nof_elements += element_estimate;
}
}
- if (IsFastDoubleElementsKind(elements_kind)) {
- Handle<FixedDoubleArray> elms = Handle<FixedDoubleArray>::cast(elms_obj);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- Object* arg = args[3 + k - actual_start];
- if (arg->IsSmi()) {
- elms->set(k, Smi::cast(arg)->value());
- } else {
- elms->set(k, HeapNumber::cast(arg)->value());
+ // If estimated number of elements is more than half of length, a
+ // fixed array (fast case) is more time and space-efficient than a
+ // dictionary.
+ bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
+
+ if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
+ Handle<FixedArrayBase> storage =
+ isolate->factory()->NewFixedDoubleArray(estimate_result_length);
+ int j = 0;
+ bool failure = false;
+ if (estimate_result_length > 0) {
+ Handle<FixedDoubleArray> double_storage =
+ Handle<FixedDoubleArray>::cast(storage);
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj((*args)[i], isolate);
+ if (obj->IsSmi()) {
+ double_storage->set(j, Smi::cast(*obj)->value());
+ j++;
+ } else if (obj->IsNumber()) {
+ double_storage->set(j, obj->Number());
+ j++;
+ } else {
+ JSArray* array = JSArray::cast(*obj);
+ uint32_t length = static_cast<uint32_t>(array->length()->Number());
+ switch (array->map()->elements_kind()) {
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS: {
+ // Empty array is FixedArray but not FixedDoubleArray.
+ if (length == 0) break;
+ FixedDoubleArray* elements =
+ FixedDoubleArray::cast(array->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (elements->is_the_hole(i)) {
+ // TODO(jkummerow/verwaest): We could be a bit more clever
+ // here: Check if there are no elements/getters on the
+ // prototype chain, and if so, allow creation of a holey
+ // result array.
+ // Same thing below (holey smi case).
+ failure = true;
+ break;
+ }
+ double double_value = elements->get_scalar(i);
+ double_storage->set(j, double_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ FixedArray* elements(FixedArray::cast(array->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ Object* element = elements->get(i);
+ if (element->IsTheHole()) {
+ failure = true;
+ break;
+ }
+ int32_t int_value = Smi::cast(element)->value();
+ double_storage->set(j, int_value);
+ j++;
+ }
+ break;
+ }
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ DCHECK_EQ(0u, length);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ if (failure) break;
}
}
- } else {
- Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
+ if (!failure) {
+ Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+ Smi* length = Smi::FromInt(j);
+ Handle<Map> map;
+ map = JSObject::GetElementsTransitionMap(array, kind);
+ array->set_map(*map);
+ array->set_length(length);
+ array->set_elements(*storage);
+ return *array;
}
+ // In case of failure, fall through.
}
- if (elms_changed) {
- array->set_elements(*elms_obj);
+ Handle<FixedArray> storage;
+ if (fast_case) {
+ // The backing storage array must have non-existing elements to preserve
+ // holes across concat operations.
+ storage =
+ isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
+ } else {
+ // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
+ uint32_t at_least_space_for =
+ estimate_nof_elements + (estimate_nof_elements >> 2);
+ storage = Handle<FixedArray>::cast(
+ SeededNumberDictionary::New(isolate, at_least_space_for));
}
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return *result_array;
-}
+ ArrayConcatVisitor visitor(isolate, storage, fast_case);
+ for (int i = 0; i < argument_count; i++) {
+ Handle<Object> obj((*args)[i], isolate);
+ bool spreadable = IsConcatSpreadable(isolate, obj);
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ if (spreadable) {
+ Handle<JSObject> object = Handle<JSObject>::cast(obj);
+ if (!IterateElements(isolate, object, &visitor)) {
+ return isolate->heap()->exception();
+ }
+ } else {
+ visitor.visit(0, obj);
+ visitor.increase_index_offset(1);
+ }
+ }
+
+ if (visitor.exceeds_array_limit()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
+ }
+ return *visitor.ToArray();
+}
-BUILTIN(ArrayConcat) {
- HandleScope scope(isolate);
- int n_arguments = args.length();
+MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
+ if (!isolate->IsFastArrayConstructorPrototypeChainIntact()) {
+ return MaybeHandle<JSArray>();
+ }
+ int n_arguments = args->length();
int result_len = 0;
- ElementsKind elements_kind = GetInitialFastElementsKind();
- bool has_double = false;
{
DisallowHeapAllocation no_gc;
- Context* native_context = isolate->context()->native_context();
- Object* array_proto = native_context->array_function()->prototype();
- PrototypeIterator iter(isolate, array_proto,
- PrototypeIterator::START_AT_RECEIVER);
- if (!ArrayPrototypeHasNoElements(&iter)) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arrayConcat", args);
- }
-
+ Object* array_proto = isolate->array_function()->prototype();
// Iterate through all the arguments performing checks
// and calculating total length.
- bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
- Object* arg = args[i];
+ Object* arg = (*args)[i];
+ if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
+ Handle<JSArray> array(JSArray::cast(arg), isolate);
+ if (!array->HasFastElements()) return MaybeHandle<JSArray>();
PrototypeIterator iter(isolate, arg);
- if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() ||
- iter.GetCurrent() != array_proto) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arrayConcat", args);
+ if (iter.GetCurrent() != array_proto) return MaybeHandle<JSArray>();
+ if (HasConcatSpreadableModifier(isolate, array)) {
+ return MaybeHandle<JSArray>();
}
- int len = Smi::cast(JSArray::cast(arg)->length())->value();
+ int len = Smi::cast(array->length())->value();
// We shouldn't overflow when adding another len.
const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
@@ -876,50 +1409,82 @@ BUILTIN(ArrayConcat) {
USE(kHalfOfMaxInt);
result_len += len;
DCHECK(result_len >= 0);
-
- if (result_len > FixedDoubleArray::kMaxLength) {
- AllowHeapAllocation allow_allocation;
- return CallJsBuiltin(isolate, "$arrayConcat", args);
+ // Throw an Error if we overflow the FixedArray limits
+ if (FixedArray::kMaxLength < result_len) {
+ THROW_NEW_ERROR(isolate,
+ NewRangeError(MessageTemplate::kInvalidArrayLength),
+ JSArray);
}
-
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
- has_double = has_double || IsFastDoubleElementsKind(arg_kind);
- is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
- if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
- elements_kind = arg_kind;
- }
- }
- if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
- }
-
- // If a double array is concatted into a fast elements array, the fast
- // elements array needs to be initialized to contain proper holes, since
- // boxing doubles may cause incremental marking.
- ArrayStorageAllocationMode mode =
- has_double && IsFastObjectElementsKind(elements_kind)
- ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- Handle<JSArray> result_array = isolate->factory()->NewJSArray(
- elements_kind, result_len, result_len, Strength::WEAK, mode);
- if (result_len == 0) return *result_array;
-
- int j = 0;
- Handle<FixedArrayBase> storage(result_array->elements(), isolate);
- ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- for (int i = 0; i < n_arguments; i++) {
- // It is crucial to keep |array| in a raw pointer form to avoid performance
- // degradation.
- JSArray* array = JSArray::cast(args[i]);
- int len = Smi::cast(array->length())->value();
- if (len > 0) {
- ElementsKind from_kind = array->GetElementsKind();
- accessor->CopyElements(array, 0, from_kind, storage, j, len);
- j += len;
}
}
+ return ElementsAccessor::Concat(isolate, args, n_arguments);
+}
+
+} // namespace
- DCHECK(j == result_len);
+BUILTIN(ArrayConcat) {
+ HandleScope scope(isolate);
- return *result_array;
+ Handle<Object> receiver;
+ if (!Object::ToObject(isolate, handle(args[0], isolate))
+ .ToHandle(&receiver)) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Array.prototype.concat")));
+ }
+ args[0] = *receiver;
+
+ Handle<JSArray> result_array;
+ if (Fast_ArrayConcat(isolate, &args).ToHandle(&result_array)) {
+ return *result_array;
+ }
+ if (isolate->has_pending_exception()) return isolate->heap()->exception();
+ return Slow_ArrayConcat(&args, isolate);
+}
+
+
+// ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ] ( hint )
+BUILTIN(DateToPrimitive) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ if (!args.receiver()->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kIncompatibleMethodReceiver,
+ isolate->factory()->NewStringFromAsciiChecked(
+ "Date.prototype [ @@toPrimitive ]"),
+ args.receiver()));
+ }
+ Handle<JSReceiver> receiver = args.at<JSReceiver>(0);
+ Handle<Object> hint = args.at<Object>(1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ JSDate::ToPrimitive(receiver, hint));
+ return *result;
+}
+
+
+// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Call]] case.
+BUILTIN(SymbolConstructor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ Handle<Symbol> result = isolate->factory()->NewSymbol();
+ Handle<Object> description = args.at<Object>(1);
+ if (!description->IsUndefined()) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, description,
+ Object::ToString(isolate, description));
+ result->set_name(*description);
+ }
+ return *result;
+}
+
+
+// ES6 section 19.4.1.1 Symbol ( [ description ] ) for the [[Construct]] case.
+BUILTIN(SymbolConstructor_ConstructStub) {
+ HandleScope scope(isolate);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kNotConstructor,
+ isolate->factory()->Symbol_string()));
}
@@ -1117,7 +1682,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// Get the invocation callback from the function descriptor that was
// used to create the called object.
- DCHECK(obj->map()->has_instance_call_handler());
+ DCHECK(obj->map()->is_callable());
JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
// TODO(ishell): turn this back to a DCHECK.
CHECK(constructor->shared()->IsApiFunction());
@@ -1325,17 +1890,6 @@ Address const Builtins::c_functions_[cfunction_count] = {
};
#undef DEF_ENUM_C
-#define DEF_JS_NAME(name, ignore) #name,
-#define DEF_JS_ARGC(ignore, argc) argc,
-const char* const Builtins::javascript_names_[id_count] = {
- BUILTINS_LIST_JS(DEF_JS_NAME)
-};
-
-int const Builtins::javascript_argc_[id_count] = {
- BUILTINS_LIST_JS(DEF_JS_ARGC)
-};
-#undef DEF_JS_NAME
-#undef DEF_JS_ARGC
struct BuiltinDesc {
byte* generator;
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index cf90aacf89..d9129608dc 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -5,6 +5,8 @@
#ifndef V8_BUILTINS_H_
#define V8_BUILTINS_H_
+#include "src/handles.h"
+
namespace v8 {
namespace internal {
@@ -55,6 +57,11 @@ enum BuiltinExtraArguments {
V(ArraySplice, NO_EXTRA_ARGUMENTS) \
V(ArrayConcat, NO_EXTRA_ARGUMENTS) \
\
+ V(DateToPrimitive, NO_EXTRA_ARGUMENTS) \
+ \
+ V(SymbolConstructor, NO_EXTRA_ARGUMENTS) \
+ V(SymbolConstructor_ConstructStub, NO_EXTRA_ARGUMENTS) \
+ \
V(HandleApiCall, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
@@ -64,68 +71,79 @@ enum BuiltinExtraArguments {
V(RestrictedStrictArgumentsPropertiesThrower, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V) \
- V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
- V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
- LoadICState::kStrongModeState) \
- \
- V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
- V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
- kNoExtraICState) \
- V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
- \
- V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
- StoreICState::kStrictModeState) \
- V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
- StoreICState::kStrictModeState) \
- \
- V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(StringConstructCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- \
- V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
- V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+#define BUILTIN_LIST_A(V) \
+ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(CallFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Call, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(ConstructFunction, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ConstructProxy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(Construct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(PushArgsAndCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubForDerived, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterpreterExitTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState) \
+ V(KeyedLoadIC_Megamorphic, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedLoadIC_Megamorphic_Strong, KEYED_LOAD_IC, MEGAMORPHIC, \
+ LoadICState::kStrongModeState) \
+ \
+ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState) \
+ V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC, \
+ kNoExtraICState) \
+ V(KeyedStoreIC_Megamorphic, KEYED_STORE_IC, MEGAMORPHIC, kNoExtraICState) \
+ \
+ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
+ StoreICState::kStrictModeState) \
+ V(KeyedStoreIC_Megamorphic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
+ StoreICState::kStrictModeState) \
+ \
+ V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(StringConstructor, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StringConstructor_ConstructStub, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ \
+ V(MarkCodeAsToBeExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
// Define list of builtin handlers implemented in assembly.
@@ -147,49 +165,6 @@ enum BuiltinExtraArguments {
V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState) \
V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, kNoExtraICState)
-// Define list of builtins implemented in JavaScript.
-#define BUILTINS_LIST_JS(V) \
- V(EQUALS, 1) \
- V(COMPARE, 2) \
- V(COMPARE_STRONG, 2) \
- V(ADD, 1) \
- V(ADD_STRONG, 1) \
- V(SUB, 1) \
- V(SUB_STRONG, 1) \
- V(MUL, 1) \
- V(MUL_STRONG, 1) \
- V(DIV, 1) \
- V(DIV_STRONG, 1) \
- V(MOD, 1) \
- V(MOD_STRONG, 1) \
- V(BIT_OR, 1) \
- V(BIT_OR_STRONG, 1) \
- V(BIT_AND, 1) \
- V(BIT_AND_STRONG, 1) \
- V(BIT_XOR, 1) \
- V(BIT_XOR_STRONG, 1) \
- V(SHL, 1) \
- V(SHL_STRONG, 1) \
- V(SAR, 1) \
- V(SAR_STRONG, 1) \
- V(SHR, 1) \
- V(SHR_STRONG, 1) \
- V(IN, 1) \
- V(INSTANCE_OF, 1) \
- V(CALL_NON_FUNCTION, 0) \
- V(CALL_NON_FUNCTION_AS_CONSTRUCTOR, 0) \
- V(CALL_FUNCTION_PROXY, 1) \
- V(CALL_FUNCTION_PROXY_AS_CONSTRUCTOR, 1) \
- V(TO_NUMBER, 0) \
- V(TO_STRING, 0) \
- V(TO_NAME, 0) \
- V(STRING_ADD_LEFT, 1) \
- V(STRING_ADD_RIGHT, 1) \
- V(APPLY_PREPARE, 1) \
- V(REFLECT_APPLY_PREPARE, 1) \
- V(REFLECT_CONSTRUCT_PREPARE, 2) \
- V(CONCAT_ITERABLE_TO_ARRAY, 1) \
- V(STACK_OVERFLOW, 1)
class BuiltinFunctionTable;
class ObjectVisitor;
@@ -230,13 +205,6 @@ class Builtins {
cfunction_count
};
- enum JavaScript {
-#define DEF_ENUM(name, ignore) name,
- BUILTINS_LIST_JS(DEF_ENUM)
-#undef DEF_ENUM
- id_count
- };
-
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
@@ -262,14 +230,11 @@ class Builtins {
return c_functions_[id];
}
- static const char* GetName(JavaScript id) { return javascript_names_[id]; }
const char* name(int index) {
DCHECK(index >= 0);
DCHECK(index < builtin_count);
return names_[index];
}
- static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
- static int NumberOfJavaScriptBuiltins() { return id_count; }
bool is_initialized() const { return initialized_; }
@@ -288,8 +253,6 @@ class Builtins {
// function f, we use an Object* array here.
Object* builtins_[builtin_count];
const char* names_[builtin_count];
- static const char* const javascript_names_[id_count];
- static int const javascript_argc_[id_count];
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
@@ -312,6 +275,20 @@ class Builtins {
static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
+ // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ static void Generate_CallFunction(MacroAssembler* masm);
+ // ES6 section 7.3.12 Call(F, V, [argumentsList])
+ static void Generate_Call(MacroAssembler* masm);
+
+ // ES6 section 9.2.2 [[Construct]] ( argumentsList, newTarget)
+ static void Generate_ConstructFunction(MacroAssembler* masm);
+ // ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget)
+ static void Generate_ConstructProxy(MacroAssembler* masm);
+ // ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget])
+ static void Generate_Construct(MacroAssembler* masm);
+
+ static void Generate_PushArgsAndCall(MacroAssembler* masm);
+
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
static void Generate_ReflectApply(MacroAssembler* masm);
@@ -320,7 +297,8 @@ class Builtins {
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
- static void Generate_StringConstructCode(MacroAssembler* masm);
+ static void Generate_StringConstructor(MacroAssembler* masm);
+ static void Generate_StringConstructor_ConstructStub(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
static void Generate_OsrAfterStackCheck(MacroAssembler* masm);
static void Generate_InterruptCheck(MacroAssembler* masm);
diff --git a/deps/v8/src/cancelable-task.cc b/deps/v8/src/cancelable-task.cc
index 32d5057c49..5927c22cde 100644
--- a/deps/v8/src/cancelable-task.cc
+++ b/deps/v8/src/cancelable-task.cc
@@ -5,7 +5,7 @@
#include "src/cancelable-task.h"
#include "src/base/platform/platform.h"
-#include "src/v8.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index f6a8014b2e..9e776b40fc 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -153,9 +153,8 @@ Callable CodeFactory::StoreGlobalViaContext(Isolate* isolate, int depth,
// static
-Callable CodeFactory::Instanceof(Isolate* isolate,
- InstanceofStub::Flags flags) {
- InstanceofStub stub(isolate, flags);
+Callable CodeFactory::InstanceOf(Isolate* isolate) {
+ InstanceOfStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
@@ -177,6 +176,13 @@ Callable CodeFactory::ToNumber(Isolate* isolate) {
// static
+Callable CodeFactory::ToString(Isolate* isolate) {
+ ToStringStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::ToObject(Isolate* isolate) {
ToObjectStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -192,6 +198,13 @@ Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
// static
+Callable CodeFactory::StringCompare(Isolate* isolate) {
+ StringCompareStub stub(isolate);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::Typeof(Isolate* isolate) {
TypeofStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -223,6 +236,17 @@ Callable CodeFactory::FastNewClosure(Isolate* isolate,
// static
+Callable CodeFactory::ArgumentsAccess(Isolate* isolate,
+ bool is_unmapped_arguments,
+ bool has_duplicate_parameters) {
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped_arguments, has_duplicate_parameters);
+ ArgumentsAccessStub stub(isolate, type);
+ return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
Callable CodeFactory::AllocateHeapNumber(Isolate* isolate) {
AllocateHeapNumberStub stub(isolate);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
@@ -236,5 +260,12 @@ Callable CodeFactory::CallFunction(Isolate* isolate, int argc,
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
+
+// static
+Callable CodeFactory::PushArgsAndCall(Isolate* isolate) {
+ return Callable(isolate->builtins()->PushArgsAndCall(),
+ PushArgsAndCallDescriptor(isolate));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 1386f054bb..5a67b27fd5 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -66,17 +66,19 @@ class CodeFactory final {
static Callable StoreGlobalViaContext(Isolate* isolate, int depth,
LanguageMode language_mode);
- static Callable Instanceof(Isolate* isolate, InstanceofStub::Flags flags);
+ static Callable InstanceOf(Isolate* isolate);
static Callable ToBoolean(
Isolate* isolate, ToBooleanStub::ResultMode mode,
ToBooleanStub::Types types = ToBooleanStub::Types());
static Callable ToNumber(Isolate* isolate);
+ static Callable ToString(Isolate* isolate);
static Callable ToObject(Isolate* isolate);
static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
PretenureFlag pretenure_flag);
+ static Callable StringCompare(Isolate* isolate);
static Callable Typeof(Isolate* isolate);
@@ -86,10 +88,15 @@ class CodeFactory final {
static Callable FastNewClosure(Isolate* isolate, LanguageMode language_mode,
FunctionKind kind);
+ static Callable ArgumentsAccess(Isolate* isolate, bool is_unmapped_arguments,
+ bool has_duplicate_parameters);
+
static Callable AllocateHeapNumber(Isolate* isolate);
static Callable CallFunction(Isolate* isolate, int argc,
CallFunctionFlags flags);
+
+ static Callable PushArgsAndCall(Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index 81304e5002..dc2ae554bf 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/code-stubs.h"
#include "src/bailout-reason.h"
-#include "src/code-stubs.h"
#include "src/field-index.h"
#include "src/hydrogen.h"
#include "src/ic/ic.h"
@@ -114,6 +113,9 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* shared_info,
HValue* native_context);
+ HValue* BuildToString(HValue* input, bool convert);
+ HValue* BuildToPrimitive(HValue* input, HValue* input_map);
+
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@@ -132,7 +134,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
isolate()->counters()->code_stubs()->Increment();
if (FLAG_trace_hydrogen_stubs) {
- const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
+ const char* name = CodeStub::MajorName(stub()->MajorKey());
PrintF("-----------------------------------------------------------\n");
PrintF("Compiling stub %s using hydrogen\n", name);
isolate()->GetHTracer()->TraceCompilation(info());
@@ -156,8 +158,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
HParameter::STACK_PARAMETER, r);
} else {
param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
+ start_environment->Bind(i, param);
}
- start_environment->Bind(i, param);
parameters_[i] = param;
if (i < register_param_count && IsParameterCountRegister(i)) {
param->set_type(HType::Smi());
@@ -368,14 +370,16 @@ HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
{ Push(Add<HConstant>(factory->symbol_string())); }
is_symbol.Else();
{
+ HValue* bit_field = Add<HLoadNamedField>(
+ map, nullptr, HObjectAccess::ForMapBitField());
+ HValue* bit_field_masked = AddUncasted<HBitwise>(
+ Token::BIT_AND, bit_field,
+ Add<HConstant>((1 << Map::kIsCallable) |
+ (1 << Map::kIsUndetectable)));
IfBuilder is_function(this);
- HConstant* js_function = Add<HConstant>(JS_FUNCTION_TYPE);
- HConstant* js_function_proxy =
- Add<HConstant>(JS_FUNCTION_PROXY_TYPE);
- is_function.If<HCompareNumericAndBranch>(instance_type, js_function,
- Token::EQ);
- is_function.OrIf<HCompareNumericAndBranch>(
- instance_type, js_function_proxy, Token::EQ);
+ is_function.If<HCompareNumericAndBranch>(
+ bit_field_masked, Add<HConstant>(1 << Map::kIsCallable),
+ Token::EQ);
is_function.Then();
{ Push(Add<HConstant>(factory->function_string())); }
is_function.Else();
@@ -391,7 +395,9 @@ HValue* CodeStubGraphBuilder<TypeofStub>::BuildCodeStub() {
#undef SIMD128_BUILDER_OPEN
// Is it an undetectable object?
IfBuilder is_undetectable(this);
- is_undetectable.If<HIsUndetectableAndBranch>(object);
+ is_undetectable.If<HCompareNumericAndBranch>(
+ bit_field_masked, Add<HConstant>(1 << Map::kIsUndetectable),
+ Token::EQ);
is_undetectable.Then();
{
// typeof an undetectable object is 'undefined'.
@@ -436,8 +442,9 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
// so that it doesn't build and eager frame.
info()->MarkMustNotHaveEagerFrame();
- HInstruction* allocation_site =
- Add<HLoadKeyed>(GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS);
+ HInstruction* allocation_site = Add<HLoadKeyed>(
+ GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS,
+ NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
undefined);
@@ -498,8 +505,9 @@ template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
HValue* undefined = graph()->GetConstantUndefined();
- HInstruction* allocation_site =
- Add<HLoadKeyed>(GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS);
+ HInstruction* allocation_site = Add<HLoadKeyed>(
+ GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS,
+ NEVER_RETURN_HOLE, LiteralsArray::kOffsetToFirstLiteral - kHeapObjectTag);
IfBuilder checker(this);
checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
@@ -1016,7 +1024,7 @@ Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); }
template <>
HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
- HValue* object = GetParameter(StoreTransitionDescriptor::kReceiverIndex);
+ HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
switch (casted_stub()->store_mode()) {
case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
@@ -1047,17 +1055,17 @@ HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
case StoreTransitionStub::StoreMapAndValue:
// Store the new value into the "extended" object.
BuildStoreNamedField(
- object, GetParameter(StoreTransitionDescriptor::kValueIndex),
+ object, GetParameter(StoreTransitionHelper::ValueIndex()),
casted_stub()->index(), casted_stub()->representation(), true);
// Fall through.
case StoreTransitionStub::StoreMapOnly:
// And finally update the map.
Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
- GetParameter(StoreTransitionDescriptor::kMapIndex));
+ GetParameter(StoreTransitionHelper::MapIndex()));
break;
}
- return GetParameter(StoreTransitionDescriptor::kValueIndex);
+ return GetParameter(StoreTransitionHelper::ValueIndex());
}
@@ -1453,6 +1461,140 @@ Handle<Code> BinaryOpWithAllocationSiteStub::GenerateCode() {
}
+HValue* CodeStubGraphBuilderBase::BuildToString(HValue* input, bool convert) {
+ if (!convert) return BuildCheckString(input);
+ IfBuilder if_inputissmi(this);
+ HValue* inputissmi = if_inputissmi.If<HIsSmiAndBranch>(input);
+ if_inputissmi.Then();
+ {
+ // Convert the input smi to a string.
+ Push(BuildNumberToString(input, Type::SignedSmall()));
+ }
+ if_inputissmi.Else();
+ {
+ HValue* input_map =
+ Add<HLoadNamedField>(input, inputissmi, HObjectAccess::ForMap());
+ HValue* input_instance_type = Add<HLoadNamedField>(
+ input_map, inputissmi, HObjectAccess::ForMapInstanceType());
+ IfBuilder if_inputisstring(this);
+ if_inputisstring.If<HCompareNumericAndBranch>(
+ input_instance_type, Add<HConstant>(FIRST_NONSTRING_TYPE), Token::LT);
+ if_inputisstring.Then();
+ {
+ // The input is already a string.
+ Push(input);
+ }
+ if_inputisstring.Else();
+ {
+ // Convert to primitive first (if necessary), see
+ // ES6 section 12.7.3 The Addition operator.
+ IfBuilder if_inputisprimitive(this);
+ STATIC_ASSERT(FIRST_PRIMITIVE_TYPE == FIRST_TYPE);
+ if_inputisprimitive.If<HCompareNumericAndBranch>(
+ input_instance_type, Add<HConstant>(LAST_PRIMITIVE_TYPE), Token::LTE);
+ if_inputisprimitive.Then();
+ {
+ // The input is already a primitive.
+ Push(input);
+ }
+ if_inputisprimitive.Else();
+ {
+ // Convert the input to a primitive.
+ Push(BuildToPrimitive(input, input_map));
+ }
+ if_inputisprimitive.End();
+ // Convert the primitive to a string value.
+ ToStringDescriptor descriptor(isolate());
+ ToStringStub stub(isolate());
+ HValue* values[] = {context(), Pop()};
+ Push(AddUncasted<HCallWithDescriptor>(
+ Add<HConstant>(stub.GetCode()), 0, descriptor,
+ Vector<HValue*>(values, arraysize(values))));
+ }
+ if_inputisstring.End();
+ }
+ if_inputissmi.End();
+ return Pop();
+}
+
+
+HValue* CodeStubGraphBuilderBase::BuildToPrimitive(HValue* input,
+ HValue* input_map) {
+ // Get the native context of the caller.
+ HValue* native_context = BuildGetNativeContext();
+
+ // Determine the initial map of the %ObjectPrototype%.
+ HValue* object_function_prototype_map =
+ Add<HLoadNamedField>(native_context, nullptr,
+ HObjectAccess::ForContextSlot(
+ Context::OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX));
+
+ // Determine the initial map of the %StringPrototype%.
+ HValue* string_function_prototype_map =
+ Add<HLoadNamedField>(native_context, nullptr,
+ HObjectAccess::ForContextSlot(
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+
+ // Determine the initial map of the String function.
+ HValue* string_function = Add<HLoadNamedField>(
+ native_context, nullptr,
+ HObjectAccess::ForContextSlot(Context::STRING_FUNCTION_INDEX));
+ HValue* string_function_initial_map = Add<HLoadNamedField>(
+ string_function, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
+
+ // Determine the map of the [[Prototype]] of {input}.
+ HValue* input_prototype =
+ Add<HLoadNamedField>(input_map, nullptr, HObjectAccess::ForPrototype());
+ HValue* input_prototype_map =
+ Add<HLoadNamedField>(input_prototype, nullptr, HObjectAccess::ForMap());
+
+ // For string wrappers (JSValue instances with [[StringData]] internal
+ // fields), we can shortcirciut the ToPrimitive if
+ //
+ // (a) the {input} map matches the initial map of the String function,
+ // (b) the {input} [[Prototype]] is the unmodified %StringPrototype% (i.e.
+ // no one monkey-patched toString, @@toPrimitive or valueOf), and
+ // (c) the %ObjectPrototype% (i.e. the [[Prototype]] of the
+ // %StringPrototype%) is also unmodified, that is no one sneaked a
+ // @@toPrimitive into the %ObjectPrototype%.
+ //
+ // If all these assumptions hold, we can just take the [[StringData]] value
+ // and return it.
+ // TODO(bmeurer): This just repairs a regression introduced by removing the
+ // weird (and broken) intrinsic %_IsStringWrapperSafeForDefaultValue, which
+ // was intendend to something similar to this, although less efficient and
+ // wrong in the presence of @@toPrimitive. Long-term we might want to move
+ // into the direction of having a ToPrimitiveStub that can do common cases
+ // while staying in JavaScript land (i.e. not going to C++).
+ IfBuilder if_inputisstringwrapper(this);
+ if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
+ input_map, string_function_initial_map);
+ if_inputisstringwrapper.And();
+ if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
+ input_prototype_map, string_function_prototype_map);
+ if_inputisstringwrapper.And();
+ if_inputisstringwrapper.If<HCompareObjectEqAndBranch>(
+ Add<HLoadNamedField>(Add<HLoadNamedField>(input_prototype_map, nullptr,
+ HObjectAccess::ForPrototype()),
+ nullptr, HObjectAccess::ForMap()),
+ object_function_prototype_map);
+ if_inputisstringwrapper.Then();
+ {
+ Push(BuildLoadNamedField(
+ input, FieldIndex::ForInObjectOffset(JSValue::kValueOffset)));
+ }
+ if_inputisstringwrapper.Else();
+ {
+ // TODO(bmeurer): Add support for fast ToPrimitive conversion using
+ // a dedicated ToPrimitiveStub.
+ Add<HPushArguments>(input);
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToPrimitive), 1));
+ }
+ if_inputisstringwrapper.End();
+ return Pop();
+}
+
+
template <>
HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
StringAddStub* stub = casted_stub();
@@ -1464,10 +1606,12 @@ HValue* CodeStubGraphBuilder<StringAddStub>::BuildCodeInitializedStub() {
// Make sure that both arguments are strings if not known in advance.
if ((flags & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
- left = BuildCheckString(left);
+ left =
+ BuildToString(left, (flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT);
}
if ((flags & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
- right = BuildCheckString(right);
+ right = BuildToString(right,
+ (flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT);
}
return BuildStringAdd(left, right, HAllocationMode(pretenure_flag));
@@ -1610,10 +1754,10 @@ Handle<Code> StoreGlobalStub::GenerateCode() {
template <>
HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
- HValue* object = GetParameter(StoreTransitionDescriptor::kReceiverIndex);
- HValue* key = GetParameter(StoreTransitionDescriptor::kNameIndex);
- HValue* value = GetParameter(StoreTransitionDescriptor::kValueIndex);
- HValue* map = GetParameter(StoreTransitionDescriptor::kMapIndex);
+ HValue* object = GetParameter(StoreTransitionHelper::ReceiverIndex());
+ HValue* key = GetParameter(StoreTransitionHelper::NameIndex());
+ HValue* value = GetParameter(StoreTransitionHelper::ValueIndex());
+ HValue* map = GetParameter(StoreTransitionHelper::MapIndex());
if (FLAG_trace_elements_transitions) {
// Tracing elements transitions is the job of the runtime.
@@ -1664,13 +1808,15 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
optimized_map, map_index, SharedFunctionInfo::kContextOffset);
HValue* osr_ast_slot = LoadFromOptimizedCodeMap(
optimized_map, map_index, SharedFunctionInfo::kOsrAstIdOffset);
+ HValue* code_object = LoadFromOptimizedCodeMap(
+ optimized_map, map_index, SharedFunctionInfo::kCachedCodeOffset);
builder->If<HCompareObjectEqAndBranch>(native_context,
context_slot);
builder->AndIf<HCompareObjectEqAndBranch>(osr_ast_slot, osr_ast_id_none);
+ builder->And();
+ builder->IfNot<HCompareObjectEqAndBranch>(code_object,
+ graph()->GetConstantUndefined());
builder->Then();
- HValue* code_object = LoadFromOptimizedCodeMap(optimized_map,
- map_index, SharedFunctionInfo::kCachedCodeOffset);
- // and the literals
HValue* literals = LoadFromOptimizedCodeMap(optimized_map,
map_index, SharedFunctionInfo::kLiteralsOffset);
@@ -2192,7 +2338,6 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
// KeyedLookupCache miss; call runtime.
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(is_strong(casted_stub()->language_mode())
? Runtime::kKeyedGetPropertyStrong
: Runtime::kKeyedGetProperty),
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index b48b828ae7..5c8c763a3a 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -7,13 +7,13 @@
#include <sstream>
#include "src/bootstrapper.h"
-#include "src/cpu-profiler.h"
#include "src/factory.h"
#include "src/gdb-jit.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/parser.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -174,7 +174,7 @@ Handle<Code> CodeStub::GetCode() {
Handle<UnseededNumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
- heap->public_set_code_stubs(*dict);
+ heap->SetRootCodeStubs(*dict);
}
code = *new_object;
}
@@ -187,8 +187,7 @@ Handle<Code> CodeStub::GetCode() {
}
-const char* CodeStub::MajorName(CodeStub::Major major_key,
- bool allow_unknown_keys) {
+const char* CodeStub::MajorName(CodeStub::Major major_key) {
switch (major_key) {
#define DEF_CASE(name) case name: return #name "Stub";
CODE_STUB_LIST(DEF_CASE)
@@ -204,7 +203,7 @@ const char* CodeStub::MajorName(CodeStub::Major major_key,
void CodeStub::PrintBaseName(std::ostream& os) const { // NOLINT
- os << MajorName(MajorKey(), false);
+ os << MajorName(MajorKey());
}
@@ -325,6 +324,12 @@ std::ostream& operator<<(std::ostream& os, const StringAddFlags& flags) {
return os << "CheckRight";
case STRING_ADD_CHECK_BOTH:
return os << "CheckBoth";
+ case STRING_ADD_CONVERT_LEFT:
+ return os << "ConvertLeft";
+ case STRING_ADD_CONVERT_RIGHT:
+ return os << "ConvertRight";
+ case STRING_ADD_CONVERT:
+ break;
}
UNREACHABLE();
return os;
@@ -346,6 +351,7 @@ InlineCacheState CompareICStub::GetICState() const {
switch (state) {
case CompareICState::UNINITIALIZED:
return ::v8::internal::UNINITIALIZED;
+ case CompareICState::BOOLEAN:
case CompareICState::SMI:
case CompareICState::NUMBER:
case CompareICState::INTERNALIZED_STRING:
@@ -384,7 +390,6 @@ bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
Code::Flags flags = Code::ComputeFlags(
GetCodeKind(),
UNINITIALIZED);
- DCHECK(op() == Token::EQ || op() == Token::EQ_STRICT);
Handle<Object> probe(
known_map_->FindInCodeCache(
strict() ?
@@ -412,6 +417,9 @@ void CompareICStub::Generate(MacroAssembler* masm) {
case CompareICState::UNINITIALIZED:
GenerateMiss(masm);
break;
+ case CompareICState::BOOLEAN:
+ GenerateBooleans(masm);
+ break;
case CompareICState::SMI:
GenerateSmis(masm);
break;
@@ -481,7 +489,7 @@ Handle<JSFunction> GetFunction(Isolate* isolate, const char* name) {
Handle<Code> TurboFanCodeStub::GenerateCode() {
// Get the outer ("stub generator") function.
- const char* name = CodeStub::MajorName(MajorKey(), false);
+ const char* name = CodeStub::MajorName(MajorKey());
Handle<JSFunction> outer = GetFunction(isolate(), name);
DCHECK_EQ(2, outer->shared()->length());
@@ -490,21 +498,13 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
Handle<Object> call_conv = factory->InternalizeUtf8String(name);
Handle<Object> minor_key = factory->NewNumber(MinorKey());
Handle<Object> args[] = {call_conv, minor_key};
- MaybeHandle<Object> result = Execution::Call(
- isolate(), outer, factory->undefined_value(), 2, args, false);
+ MaybeHandle<Object> result =
+ Execution::Call(isolate(), outer, factory->undefined_value(), 2, args);
Handle<JSFunction> inner = Handle<JSFunction>::cast(result.ToHandleChecked());
// Just to make sure nobody calls this...
inner->set_code(isolate()->builtins()->builtin(Builtins::kIllegal));
- Zone zone;
- // Build a "hybrid" CompilationInfo for a JSFunction/CodeStub pair.
- ParseInfo parse_info(&zone, inner);
- CompilationInfo info(&parse_info);
- info.SetFunctionType(GetCallInterfaceDescriptor().GetFunctionType());
- info.MarkAsContextSpecializing();
- info.MarkAsDeoptimizationEnabled();
- info.SetStub(this);
- return info.GenerateCodeStub();
+ return Compiler::GetStubCode(inner, this).ToHandleChecked();
}
@@ -593,24 +593,11 @@ Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
}
-void CallIC_ArrayStub::PrintState(std::ostream& os) const { // NOLINT
- os << state() << " (Array)";
-}
-
-
void CallICStub::PrintState(std::ostream& os) const { // NOLINT
os << state();
}
-void InstanceofStub::PrintName(std::ostream& os) const { // NOLINT
- os << "InstanceofStub";
- if (HasArgsInRegisters()) os << "_REGS";
- if (HasCallSiteInlineCheck()) os << "_INLINE";
- if (ReturnTrueFalseObject()) os << "_TRUEFALSE";
-}
-
-
void JSEntryStub::FinishCode(Handle<Code> code) {
Handle<FixedArray> handler_table =
code->GetIsolate()->factory()->NewFixedArray(1, TENURED);
@@ -653,7 +640,8 @@ CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() const {
return LoadWithVectorDescriptor(isolate());
} else {
DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
- return StoreDescriptor(isolate());
+ return FLAG_vector_stores ? VectorStoreICDescriptor(isolate())
+ : StoreDescriptor(isolate());
}
}
@@ -679,13 +667,24 @@ void ToObjectStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor()
const {
+ if (FLAG_vector_stores) {
+ return VectorStoreTransitionDescriptor(isolate());
+ }
+ return StoreTransitionDescriptor(isolate());
+}
+
+
+CallInterfaceDescriptor
+ElementsTransitionAndStoreStub::GetCallInterfaceDescriptor() const {
+ if (FLAG_vector_stores) {
+ return VectorStoreTransitionDescriptor(isolate());
+ }
return StoreTransitionDescriptor(isolate());
}
void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
+ descriptor->Initialize(Runtime::FunctionForId(Runtime::kNewClosure)->entry);
}
@@ -862,11 +861,6 @@ void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
}
-void RestParamAccessStub::Generate(MacroAssembler* masm) {
- GenerateNew(masm);
-}
-
-
void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
os << "ArgumentsAccessStub_";
switch (type()) {
@@ -887,11 +881,6 @@ void ArgumentsAccessStub::PrintName(std::ostream& os) const { // NOLINT
}
-void RestParamAccessStub::PrintName(std::ostream& os) const { // NOLINT
- os << "RestParamAccessStub_";
-}
-
-
void CallFunctionStub::PrintName(std::ostream& os) const { // NOLINT
os << "CallFunctionStub_Args" << argc();
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 68d18c7a93..163fdd8808 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -29,18 +29,16 @@ namespace internal {
V(CallConstruct) \
V(CallFunction) \
V(CallIC) \
- V(CallIC_Array) \
V(CEntry) \
V(CompareIC) \
V(DoubleToI) \
V(FunctionPrototype) \
- V(Instanceof) \
+ V(InstanceOf) \
V(InternalArrayConstructor) \
V(JSEntry) \
V(KeyedLoadICTrampoline) \
V(LoadICTrampoline) \
V(CallICTrampoline) \
- V(CallIC_ArrayTrampoline) \
V(LoadIndexedInterceptor) \
V(LoadIndexedString) \
V(MathPow) \
@@ -54,6 +52,7 @@ namespace internal {
V(StubFailureTrampoline) \
V(SubString) \
V(ToNumber) \
+ V(ToString) \
V(ToObject) \
V(VectorStoreICTrampoline) \
V(VectorKeyedStoreICTrampoline) \
@@ -109,8 +108,7 @@ namespace internal {
V(StoreField) \
V(StoreGlobal) \
V(StoreTransition) \
- V(StringLength) \
- V(RestParamAccess)
+ V(StringLength)
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
@@ -199,7 +197,7 @@ class CodeStub BASE_EMBEDDED {
static uint32_t NoCacheKey() { return MajorKeyBits::encode(NoCache); }
- static const char* MajorName(Major major_key, bool allow_unknown_keys);
+ static const char* MajorName(Major major_key);
explicit CodeStub(Isolate* isolate) : minor_key_(0), isolate_(isolate) {}
virtual ~CodeStub() {}
@@ -316,30 +314,6 @@ class CodeStub BASE_EMBEDDED {
};
-// TODO(svenpanne) This class is only used to construct a more or less sensible
-// CompilationInfo for testing purposes, basically pretending that we are
-// currently compiling some kind of code stub. Remove this when the pipeline and
-// testing machinery is restructured in such a way that we don't have to come up
-// with a CompilationInfo out of thin air, although we only need a few parts of
-// it.
-struct FakeStubForTesting : public CodeStub {
- explicit FakeStubForTesting(Isolate* isolate) : CodeStub(isolate) {}
-
- // Only used by pipeline.cc's GetDebugName in DEBUG mode.
- Major MajorKey() const override { return CodeStub::NoCache; }
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- UNREACHABLE();
- return CallInterfaceDescriptor();
- }
-
- Handle<Code> GenerateCode() override {
- UNREACHABLE();
- return Handle<Code>();
- }
-};
-
-
#define DEFINE_CODE_STUB_BASE(NAME, SUPER) \
public: \
NAME(uint32_t key, Isolate* isolate) : SUPER(key, isolate) {} \
@@ -702,7 +676,11 @@ enum StringAddFlags {
// Check right parameter.
STRING_ADD_CHECK_RIGHT = 1 << 1,
// Check both parameters.
- STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT
+ STRING_ADD_CHECK_BOTH = STRING_ADD_CHECK_LEFT | STRING_ADD_CHECK_RIGHT,
+ // Convert parameters when check fails (instead of throwing an exception).
+ STRING_ADD_CONVERT = 1 << 2,
+ STRING_ADD_CONVERT_LEFT = STRING_ADD_CHECK_LEFT | STRING_ADD_CONVERT,
+ STRING_ADD_CONVERT_RIGHT = STRING_ADD_CHECK_RIGHT | STRING_ADD_CONVERT
};
@@ -727,8 +705,8 @@ class StringAddTFStub : public TurboFanCodeStub {
}
private:
- class StringAddFlagsBits : public BitField<StringAddFlags, 0, 2> {};
- class PretenureFlagBits : public BitField<PretenureFlag, 2, 1> {};
+ class StringAddFlagsBits : public BitField<StringAddFlags, 0, 3> {};
+ class PretenureFlagBits : public BitField<PretenureFlag, 3, 1> {};
void PrintBaseName(std::ostream& os) const override; // NOLINT
@@ -900,47 +878,14 @@ class GrowArrayElementsStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(GrowArrayElements, HydrogenCodeStub);
};
-class InstanceofStub: public PlatformCodeStub {
- public:
- enum Flags {
- kNoFlags = 0,
- kArgsInRegisters = 1 << 0,
- kCallSiteInlineCheck = 1 << 1,
- kReturnTrueFalseObject = 1 << 2
- };
-
- InstanceofStub(Isolate* isolate, Flags flags) : PlatformCodeStub(isolate) {
- minor_key_ = FlagBits::encode(flags);
- }
- static Register left() { return InstanceofDescriptor::left(); }
- static Register right() { return InstanceofDescriptor::right(); }
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- if (HasArgsInRegisters()) {
- return InstanceofDescriptor(isolate());
- }
- return ContextOnlyDescriptor(isolate());
- }
+class InstanceOfStub final : public PlatformCodeStub {
+ public:
+ explicit InstanceOfStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
- Flags flags() const { return FlagBits::decode(minor_key_); }
-
- bool HasArgsInRegisters() const { return (flags() & kArgsInRegisters) != 0; }
-
- bool HasCallSiteInlineCheck() const {
- return (flags() & kCallSiteInlineCheck) != 0;
- }
-
- bool ReturnTrueFalseObject() const {
- return (flags() & kReturnTrueFalseObject) != 0;
- }
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- class FlagBits : public BitField<Flags, 0, 3> {};
-
- DEFINE_PLATFORM_CODE_STUB(Instanceof, PlatformCodeStub);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(InstanceOf);
+ DEFINE_PLATFORM_CODE_STUB(InstanceOf, PlatformCodeStub);
};
@@ -1046,6 +991,7 @@ class CallICStub: public PlatformCodeStub {
// Code generation helpers.
void GenerateMiss(MacroAssembler* masm);
+ void HandleArrayCase(MacroAssembler* masm, Label* miss);
private:
void PrintState(std::ostream& os) const override; // NOLINT
@@ -1055,20 +1001,6 @@ class CallICStub: public PlatformCodeStub {
};
-class CallIC_ArrayStub: public CallICStub {
- public:
- CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
- : CallICStub(isolate, state_in) {}
-
- InlineCacheState GetICState() const final { return MONOMORPHIC; }
-
- private:
- void PrintState(std::ostream& os) const override; // NOLINT
-
- DEFINE_PLATFORM_CODE_STUB(CallIC_Array, CallICStub);
-};
-
-
// TODO(verwaest): Translate to hydrogen code stub.
class FunctionPrototypeStub : public PlatformCodeStub {
public:
@@ -1197,10 +1129,15 @@ class KeyedLoadSloppyArgumentsStub : public HandlerStub {
};
+class CommonStoreModeBits : public BitField<KeyedAccessStoreMode, 0, 3> {};
+
class KeyedStoreSloppyArgumentsStub : public HandlerStub {
public:
- explicit KeyedStoreSloppyArgumentsStub(Isolate* isolate)
- : HandlerStub(isolate) {}
+ explicit KeyedStoreSloppyArgumentsStub(Isolate* isolate,
+ KeyedAccessStoreMode mode)
+ : HandlerStub(isolate) {
+ set_sub_minor_key(CommonStoreModeBits::encode(mode));
+ }
protected:
Code::Kind kind() const override { return Code::KEYED_STORE_IC; }
@@ -1278,6 +1215,69 @@ class StoreFieldStub : public HandlerStub {
};
+// Register and parameter access methods are specified here instead of in
+// the CallInterfaceDescriptor because the stub uses a different descriptor
+// if FLAG_vector_stores is on.
+class StoreTransitionHelper {
+ public:
+ static Register ReceiverRegister() {
+ return StoreTransitionDescriptor::ReceiverRegister();
+ }
+
+ static Register NameRegister() {
+ return StoreTransitionDescriptor::NameRegister();
+ }
+
+ static Register ValueRegister() {
+ return StoreTransitionDescriptor::ValueRegister();
+ }
+
+ static Register SlotRegister() {
+ DCHECK(FLAG_vector_stores);
+ return VectorStoreTransitionDescriptor::SlotRegister();
+ }
+
+ static Register VectorRegister() {
+ DCHECK(FLAG_vector_stores);
+ return VectorStoreTransitionDescriptor::VectorRegister();
+ }
+
+ static Register MapRegister() {
+ return FLAG_vector_stores ? VectorStoreTransitionDescriptor::MapRegister()
+ : StoreTransitionDescriptor::MapRegister();
+ }
+
+ static int ReceiverIndex() {
+ return StoreTransitionDescriptor::kReceiverIndex;
+ }
+
+ static int NameIndex() { return StoreTransitionDescriptor::kReceiverIndex; }
+
+ static int ValueIndex() { return StoreTransitionDescriptor::kValueIndex; }
+
+ static int SlotIndex() {
+ DCHECK(FLAG_vector_stores);
+ return VectorStoreTransitionDescriptor::kSlotIndex;
+ }
+
+ static int VectorIndex() {
+ DCHECK(FLAG_vector_stores);
+ return VectorStoreTransitionDescriptor::kVectorIndex;
+ }
+
+ static int MapIndex() {
+ if (FLAG_vector_stores) {
+ return VectorStoreTransitionDescriptor::kMapIndex;
+ }
+ return StoreTransitionDescriptor::kMapIndex;
+ }
+
+ // Some platforms push Slot, Vector, Map on the stack instead of in
+ // registers.
+ static bool UsesStackArgs() { return MapRegister().is(no_reg); }
+};
+
+
class StoreTransitionStub : public HandlerStub {
public:
enum StoreMode {
@@ -1651,8 +1651,8 @@ class StringAddStub final : public HydrogenCodeStub {
static const int kRight = 1;
private:
- class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
- class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
+ class StringAddFlagsBits : public BitField<StringAddFlags, 0, 3> {};
+ class PretenureFlagBits : public BitField<PretenureFlag, 3, 1> {};
void PrintBaseName(std::ostream& os) const override; // NOLINT
@@ -1697,6 +1697,7 @@ class CompareICStub : public PlatformCodeStub {
private:
Code::Kind GetCodeKind() const override { return Code::COMPARE_IC; }
+ void GenerateBooleans(MacroAssembler* masm);
void GenerateSmis(MacroAssembler* masm);
void GenerateNumbers(MacroAssembler* masm);
void GenerateInternalizedStrings(MacroAssembler* masm);
@@ -1898,8 +1899,19 @@ class ArgumentsAccessStub: public PlatformCodeStub {
CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
if (type() == READ_ELEMENT) {
return ArgumentsAccessReadDescriptor(isolate());
+ } else {
+ return ArgumentsAccessNewDescriptor(isolate());
+ }
+ }
+
+ static Type ComputeType(bool is_unmapped, bool has_duplicate_parameters) {
+ if (is_unmapped) {
+ return Type::NEW_STRICT;
+ } else if (has_duplicate_parameters) {
+ return Type::NEW_SLOPPY_SLOW;
+ } else {
+ return Type::NEW_SLOPPY_FAST;
}
- return ContextOnlyDescriptor(isolate());
}
private:
@@ -1918,23 +1930,6 @@ class ArgumentsAccessStub: public PlatformCodeStub {
};
-class RestParamAccessStub: public PlatformCodeStub {
- public:
- explicit RestParamAccessStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
- return ContextOnlyDescriptor(isolate());
- }
-
- private:
- void GenerateNew(MacroAssembler* masm);
-
- void PrintName(std::ostream& os) const override; // NOLINT
-
- DEFINE_PLATFORM_CODE_STUB(RestParamAccess, PlatformCodeStub);
-};
-
-
class RegExpExecStub: public PlatformCodeStub {
public:
explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
@@ -1959,6 +1954,8 @@ class RegExpConstructResultStub final : public HydrogenCodeStub {
};
+// TODO(bmeurer): Deprecate the CallFunctionStub in favor of the more general
+// Invoke family of builtins.
class CallFunctionStub: public PlatformCodeStub {
public:
CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags)
@@ -2343,16 +2340,6 @@ class CallICTrampolineStub : public PlatformCodeStub {
};
-class CallIC_ArrayTrampolineStub : public CallICTrampolineStub {
- public:
- CallIC_ArrayTrampolineStub(Isolate* isolate, const CallICState& state)
- : CallICTrampolineStub(isolate, state) {}
-
- private:
- DEFINE_PLATFORM_CODE_STUB(CallIC_ArrayTrampoline, CallICTrampolineStub);
-};
-
-
class LoadICStub : public PlatformCodeStub {
public:
explicit LoadICStub(Isolate* isolate, const LoadICState& state)
@@ -2589,9 +2576,9 @@ class StoreFastElementStub : public HydrogenCodeStub {
StoreFastElementStub(Isolate* isolate, bool is_js_array,
ElementsKind elements_kind, KeyedAccessStoreMode mode)
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(ElementsKindBits::encode(elements_kind) |
- IsJSArrayBits::encode(is_js_array) |
- StoreModeBits::encode(mode));
+ set_sub_minor_key(CommonStoreModeBits::encode(mode) |
+ ElementsKindBits::encode(elements_kind) |
+ IsJSArrayBits::encode(is_js_array));
}
static void GenerateAheadOfTime(Isolate* isolate);
@@ -2603,15 +2590,22 @@ class StoreFastElementStub : public HydrogenCodeStub {
}
KeyedAccessStoreMode store_mode() const {
- return StoreModeBits::decode(sub_minor_key());
+ return CommonStoreModeBits::decode(sub_minor_key());
+ }
+
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+ if (FLAG_vector_stores) {
+ return VectorStoreICDescriptor(isolate());
+ }
+ return StoreDescriptor(isolate());
}
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
private:
- class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
- class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
- class IsJSArrayBits: public BitField<bool, 12, 1> {};
+ class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
+ class IsJSArrayBits : public BitField<bool, 11, 1> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
DEFINE_HYDROGEN_CODE_STUB(StoreFastElement, HydrogenCodeStub);
};
@@ -2824,19 +2818,29 @@ class InternalArrayNArgumentsConstructorStub : public
class StoreElementStub : public PlatformCodeStub {
public:
- StoreElementStub(Isolate* isolate, ElementsKind elements_kind)
+ StoreElementStub(Isolate* isolate, ElementsKind elements_kind,
+ KeyedAccessStoreMode mode)
: PlatformCodeStub(isolate) {
- minor_key_ = ElementsKindBits::encode(elements_kind);
+ minor_key_ = ElementsKindBits::encode(elements_kind) |
+ CommonStoreModeBits::encode(mode);
}
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override {
+ if (FLAG_vector_stores) {
+ return VectorStoreICDescriptor(isolate());
+ }
+ return StoreDescriptor(isolate());
+ }
+
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
private:
ElementsKind elements_kind() const {
return ElementsKindBits::decode(minor_key_);
}
- class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
+ class ElementsKindBits : public BitField<ElementsKind, 3, 8> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
};
@@ -2939,25 +2943,26 @@ class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
ElementsKind to_kind, bool is_jsarray,
KeyedAccessStoreMode store_mode)
: HydrogenCodeStub(isolate) {
- set_sub_minor_key(FromBits::encode(from_kind) | ToBits::encode(to_kind) |
- IsJSArrayBits::encode(is_jsarray) |
- StoreModeBits::encode(store_mode));
+ set_sub_minor_key(CommonStoreModeBits::encode(store_mode) |
+ FromBits::encode(from_kind) | ToBits::encode(to_kind) |
+ IsJSArrayBits::encode(is_jsarray));
}
ElementsKind from_kind() const { return FromBits::decode(sub_minor_key()); }
ElementsKind to_kind() const { return ToBits::decode(sub_minor_key()); }
bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
KeyedAccessStoreMode store_mode() const {
- return StoreModeBits::decode(sub_minor_key());
+ return CommonStoreModeBits::decode(sub_minor_key());
}
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() const override;
+ Code::Kind GetCodeKind() const override { return Code::HANDLER; }
+
private:
- class FromBits : public BitField<ElementsKind, 0, 8> {};
- class ToBits : public BitField<ElementsKind, 8, 8> {};
- class IsJSArrayBits : public BitField<bool, 16, 1> {};
- class StoreModeBits : public BitField<KeyedAccessStoreMode, 17, 4> {};
+ class FromBits : public BitField<ElementsKind, 3, 8> {};
+ class ToBits : public BitField<ElementsKind, 11, 8> {};
+ class IsJSArrayBits : public BitField<bool, 19, 1> {};
- DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreTransition);
DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
};
@@ -3053,6 +3058,15 @@ class ToNumberStub final : public PlatformCodeStub {
};
+class ToStringStub final : public PlatformCodeStub {
+ public:
+ explicit ToStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(ToString);
+ DEFINE_PLATFORM_CODE_STUB(ToString, PlatformCodeStub);
+};
+
+
class ToObjectStub final : public HydrogenCodeStub {
public:
explicit ToObjectStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -3066,7 +3080,7 @@ class StringCompareStub : public PlatformCodeStub {
public:
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
- DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+ DEFINE_CALL_INTERFACE_DESCRIPTOR(StringCompare);
DEFINE_PLATFORM_CODE_STUB(StringCompare, PlatformCodeStub);
};
@@ -3079,6 +3093,8 @@ class StringCompareStub : public PlatformCodeStub {
#undef DEFINE_CODE_STUB_BASE
extern Representation RepresentationFromType(Type* type);
-} } // namespace v8::internal
+
+} // namespace internal
+} // namespace v8
#endif // V8_CODE_STUBS_H_
diff --git a/deps/v8/src/code.h b/deps/v8/src/code.h
deleted file mode 100644
index a0639e8deb..0000000000
--- a/deps/v8/src/code.h
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_CODE_H_
-#define V8_CODE_H_
-
-#include "src/allocation.h"
-#include "src/handles.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-// Wrapper class for passing expected and actual parameter counts as
-// either registers or immediate values. Used to make sure that the
-// caller provides exactly the expected number of parameters to the
-// callee.
-class ParameterCount BASE_EMBEDDED {
- public:
- explicit ParameterCount(Register reg)
- : reg_(reg), immediate_(0) { }
- explicit ParameterCount(int immediate)
- : reg_(no_reg), immediate_(immediate) { }
- explicit ParameterCount(Handle<JSFunction> f)
- : reg_(no_reg),
- immediate_(f->shared()->internal_formal_parameter_count()) {}
-
- bool is_reg() const { return !reg_.is(no_reg); }
- bool is_immediate() const { return !is_reg(); }
-
- Register reg() const {
- DCHECK(is_reg());
- return reg_;
- }
- int immediate() const {
- DCHECK(is_immediate());
- return immediate_;
- }
-
- private:
- const Register reg_;
- const int immediate_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
-};
-
-
-} } // namespace v8::internal
-
-#endif // V8_CODE_H_
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index c9de2b1af1..fd039d0f8a 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -5,14 +5,14 @@
#include "src/codegen.h"
#if defined(V8_OS_AIX)
-#include <fenv.h>
+#include <fenv.h> // NOLINT(build/c++11)
#endif
#include "src/bootstrapper.h"
#include "src/compiler.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/rewriter.h"
#include "src/runtime/runtime.h"
@@ -121,16 +121,8 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
}
if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("[generating %s code for %s function: ", kind, ftype);
- if (info->IsStub()) {
- const char* name =
- CodeStub::MajorName(info->code_stub()->MajorKey(), true);
- PrintF("%s", name == NULL ? "<unknown>" : name);
- } else {
- AllowDeferredHandleDereference allow_deference_for_trace;
- PrintF("%s", info->literal()->debug_name()->ToCString().get());
- }
- PrintF("]\n");
+ base::SmartArrayPointer<char> name = info->GetDebugName();
+ PrintF("[generating %s code for %s function: %s]", kind, ftype, name.get());
}
#ifdef DEBUG
@@ -149,10 +141,19 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
CompilationInfo* info) {
Isolate* isolate = info->isolate();
+ Code::Flags flags;
+ if (info->IsStub() && info->code_stub()) {
+ DCHECK_EQ(info->output_code_kind(), info->code_stub()->GetCodeKind());
+ flags = Code::ComputeFlags(
+ info->output_code_kind(), info->code_stub()->GetICState(),
+ info->code_stub()->GetExtraICState(), info->code_stub()->GetStubType());
+ } else {
+ flags = Code::ComputeFlags(info->output_code_kind());
+ }
+
// Allocate and install the code.
CodeDesc desc;
bool is_crankshafted =
@@ -181,16 +182,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
(info->IsStub() && FLAG_print_code_stubs) ||
(info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
- const char* debug_name;
- base::SmartArrayPointer<char> debug_name_holder;
- if (info->IsStub()) {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- debug_name = CodeStub::MajorName(major_key, false);
- } else {
- debug_name_holder = info->literal()->debug_name()->ToCString();
- debug_name = debug_name_holder.get();
- }
-
+ base::SmartArrayPointer<char> debug_name = info->GetDebugName();
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
@@ -220,7 +212,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (info->IsOptimizing()) {
if (FLAG_print_unopt_code && info->parse_info()) {
os << "--- Unoptimized code ---\n";
- info->closure()->shared()->code()->Disassemble(debug_name, os);
+ info->closure()->shared()->code()->Disassemble(debug_name.get(), os);
}
os << "--- Optimized code ---\n"
<< "optimization_id = " << info->optimization_id() << "\n";
@@ -231,7 +223,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
FunctionLiteral* literal = info->literal();
os << "source_position = " << literal->start_position() << "\n";
}
- code->Disassemble(debug_name, os);
+ code->Disassemble(debug_name.get(), os);
os << "--- End code ---\n";
}
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 9025a9fca6..04f130999e 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -76,7 +76,6 @@ class CodeGenerator {
// Allocate and install the code.
static Handle<Code> MakeCodeEpilogue(MacroAssembler* masm,
- Code::Flags flags,
CompilationInfo* info);
// Print the code after compiling it.
diff --git a/deps/v8/src/collection-iterator.js b/deps/v8/src/collection-iterator.js
index 8cae6060d0..c799d6f9cd 100644
--- a/deps/v8/src/collection-iterator.js
+++ b/deps/v8/src/collection-iterator.js
@@ -15,6 +15,8 @@ var $setValues;
var GlobalMap = global.Map;
var GlobalSet = global.Set;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
// -------------------------------------------------------------------
@@ -30,21 +32,21 @@ function SetIteratorNextJS() {
}
var value_array = [UNDEFINED, UNDEFINED];
- var entry = {value: value_array, done: false};
+ var result = %_CreateIterResultObject(value_array, false);
switch (%SetIteratorNext(this, value_array)) {
case 0:
- entry.value = UNDEFINED;
- entry.done = true;
+ result.value = UNDEFINED;
+ result.done = true;
break;
case ITERATOR_KIND_VALUES:
- entry.value = value_array[0];
+ result.value = value_array[0];
break;
case ITERATOR_KIND_ENTRIES:
value_array[1] = value_array[0];
break;
}
- return entry;
+ return result;
}
@@ -74,7 +76,7 @@ utils.InstallFunctions(SetIterator.prototype, DONT_ENUM, [
'next', SetIteratorNextJS
]);
-%AddNamedProperty(SetIterator.prototype, symbolToStringTag,
+%AddNamedProperty(SetIterator.prototype, toStringTagSymbol,
"Set Iterator", READ_ONLY | DONT_ENUM);
utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
@@ -83,7 +85,7 @@ utils.InstallFunctions(GlobalSet.prototype, DONT_ENUM, [
'values', SetValues
]);
-%AddNamedProperty(GlobalSet.prototype, symbolIterator, SetValues, DONT_ENUM);
+%AddNamedProperty(GlobalSet.prototype, iteratorSymbol, SetValues, DONT_ENUM);
$setIteratorNext = SetIteratorNextJS;
$setValues = SetValues;
@@ -102,22 +104,22 @@ function MapIteratorNextJS() {
}
var value_array = [UNDEFINED, UNDEFINED];
- var entry = {value: value_array, done: false};
+ var result = %_CreateIterResultObject(value_array, false);
switch (%MapIteratorNext(this, value_array)) {
case 0:
- entry.value = UNDEFINED;
- entry.done = true;
+ result.value = UNDEFINED;
+ result.done = true;
break;
case ITERATOR_KIND_KEYS:
- entry.value = value_array[0];
+ result.value = value_array[0];
break;
case ITERATOR_KIND_VALUES:
- entry.value = value_array[1];
+ result.value = value_array[1];
break;
// ITERATOR_KIND_ENTRIES does not need any processing.
}
- return entry;
+ return result;
}
@@ -156,7 +158,7 @@ utils.InstallFunctions(MapIterator.prototype, DONT_ENUM, [
'next', MapIteratorNextJS
]);
-%AddNamedProperty(MapIterator.prototype, symbolToStringTag,
+%AddNamedProperty(MapIterator.prototype, toStringTagSymbol,
"Map Iterator", READ_ONLY | DONT_ENUM);
@@ -166,7 +168,7 @@ utils.InstallFunctions(GlobalMap.prototype, DONT_ENUM, [
'values', MapValues
]);
-%AddNamedProperty(GlobalMap.prototype, symbolIterator, MapEntries, DONT_ENUM);
+%AddNamedProperty(GlobalMap.prototype, iteratorSymbol, MapEntries, DONT_ENUM);
$mapEntries = MapEntries;
$mapIteratorNext = MapIteratorNextJS;
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 6d43384e1c..8bf6ec3515 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -16,7 +16,9 @@ var $getExistingHash;
var GlobalMap = global.Map;
var GlobalObject = global.Object;
var GlobalSet = global.Set;
+var hashCodeSymbol = utils.ImportNow("hash_code_symbol");
var IntRandom;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
IntRandom = from.IntRandom;
@@ -90,8 +92,6 @@ function ComputeIntegerHash(key, seed) {
}
%SetForceInlineFlag(ComputeIntegerHash);
-var hashCodeSymbol = GLOBAL_PRIVATE("hash_code_symbol");
-
function GetExistingHash(key) {
if (%_IsSmi(key)) {
return ComputeIntegerHash(key, 0);
@@ -134,12 +134,12 @@ function SetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
- if (!IS_SPEC_FUNCTION(adder)) {
+ if (!IS_CALLABLE(adder)) {
throw MakeTypeError(kPropertyNotFunction, 'add', this);
}
for (var value of iterable) {
- %_CallFunction(this, value, adder);
+ %_Call(adder, this, value);
}
}
}
@@ -245,13 +245,7 @@ function SetForEach(f, receiver) {
'Set.prototype.forEach', this);
}
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
var key;
@@ -260,8 +254,7 @@ function SetForEach(f, receiver) {
while (%SetIteratorNext(iterator, value_array)) {
if (stepping) %DebugPrepareStepInIfStepping(f);
key = value_array[0];
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- %_CallFunction(new_receiver, key, key, this, f);
+ %_Call(f, receiver, key, key, this);
}
}
@@ -271,7 +264,7 @@ function SetForEach(f, receiver) {
%FunctionSetLength(GlobalSet, 0);
%FunctionSetPrototype(GlobalSet, new GlobalObject());
%AddNamedProperty(GlobalSet.prototype, "constructor", GlobalSet, DONT_ENUM);
-%AddNamedProperty(GlobalSet.prototype, symbolToStringTag, "Set",
+%AddNamedProperty(GlobalSet.prototype, toStringTagSymbol, "Set",
DONT_ENUM | READ_ONLY);
%FunctionSetLength(SetForEach, 1);
@@ -299,7 +292,7 @@ function MapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
- if (!IS_SPEC_FUNCTION(adder)) {
+ if (!IS_CALLABLE(adder)) {
throw MakeTypeError(kPropertyNotFunction, 'set', this);
}
@@ -307,7 +300,7 @@ function MapConstructor(iterable) {
if (!IS_SPEC_OBJECT(nextItem)) {
throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
}
- %_CallFunction(this, nextItem[0], nextItem[1], adder);
+ %_Call(adder, this, nextItem[0], nextItem[1]);
}
}
}
@@ -436,21 +429,14 @@ function MapForEach(f, receiver) {
'Map.prototype.forEach', this);
}
- if (!IS_SPEC_FUNCTION(f)) throw MakeTypeError(kCalledNonCallable, f);
- var needs_wrapper = false;
- if (IS_NULL(receiver)) {
- if (%IsSloppyModeFunction(f)) receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
- }
+ if (!IS_CALLABLE(f)) throw MakeTypeError(kCalledNonCallable, f);
var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
var value_array = [UNDEFINED, UNDEFINED];
while (%MapIteratorNext(iterator, value_array)) {
if (stepping) %DebugPrepareStepInIfStepping(f);
- var new_receiver = needs_wrapper ? TO_OBJECT(receiver) : receiver;
- %_CallFunction(new_receiver, value_array[1], value_array[0], this, f);
+ %_Call(f, receiver, value_array[1], value_array[0], this);
}
}
@@ -461,7 +447,7 @@ function MapForEach(f, receiver) {
%FunctionSetPrototype(GlobalMap, new GlobalObject());
%AddNamedProperty(GlobalMap.prototype, "constructor", GlobalMap, DONT_ENUM);
%AddNamedProperty(
- GlobalMap.prototype, symbolToStringTag, "Map", DONT_ENUM | READ_ONLY);
+ GlobalMap.prototype, toStringTagSymbol, "Map", DONT_ENUM | READ_ONLY);
%FunctionSetLength(MapForEach, 1);
@@ -486,7 +472,7 @@ function MapFromArray(array) {
for (var i = 0; i < length; i += 2) {
var key = array[i];
var value = array[i + 1];
- %_CallFunction(map, key, value, MapSet);
+ %_Call(MapSet, map, key, value);
}
return map;
};
@@ -495,7 +481,7 @@ function SetFromArray(array) {
var set = new GlobalSet;
var length = array.length;
for (var i = 0; i < length; ++i) {
- %_CallFunction(set, array[i], SetAdd);
+ %_Call(SetAdd, set, array[i]);
}
return set;
};
@@ -503,16 +489,16 @@ function SetFromArray(array) {
// -----------------------------------------------------------------------
// Exports
-utils.ExportToRuntime(function(to) {
- to.MapGet = MapGet;
- to.MapSet = MapSet;
- to.MapHas = MapHas;
- to.MapDelete = MapDelete;
- to.SetAdd = SetAdd;
- to.SetHas = SetHas;
- to.SetDelete = SetDelete;
- to.MapFromArray = MapFromArray;
- to.SetFromArray = SetFromArray;
-});
+%InstallToContext([
+ "map_get", MapGet,
+ "map_set", MapSet,
+ "map_has", MapHas,
+ "map_delete", MapDelete,
+ "set_add", SetAdd,
+ "set_has", SetHas,
+ "set_delete", SetDelete,
+ "map_from_array", MapFromArray,
+ "set_from_array",SetFromArray,
+]);
})
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 46d783866f..aca8cee9ca 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/compilation-cache.h"
#include "src/assembler.h"
-#include "src/compilation-cache.h"
+#include "src/counters.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -122,8 +124,8 @@ bool CompilationCacheScript::HasOrigin(Handle<SharedFunctionInfo> function_info,
return script->name()->IsUndefined();
}
// Do the fast bailout checks first.
- if (line_offset != script->line_offset()->value()) return false;
- if (column_offset != script->column_offset()->value()) return false;
+ if (line_offset != script->line_offset()) return false;
+ if (column_offset != script->column_offset()) return false;
// Check that both names are strings. If not, no match.
if (!name->IsString() || !script->name()->IsString()) return false;
// Are the origin_options same?
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index 136bb97ea8..2d7609ec18 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -5,6 +5,10 @@
#ifndef V8_COMPILATION_CACHE_H_
#define V8_COMPILATION_CACHE_H_
+#include "src/allocation.h"
+#include "src/handles.h"
+#include "src/objects.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compilation-statistics.cc b/deps/v8/src/compilation-statistics.cc
index 2686ff74ed..ed568cba3f 100644
--- a/deps/v8/src/compilation-statistics.cc
+++ b/deps/v8/src/compilation-statistics.cc
@@ -67,16 +67,15 @@ static void WriteLine(std::ostream& os, const char* name,
static_cast<double>(stats.total_allocated_bytes_ * 100) /
static_cast<double>(total_stats.total_allocated_bytes_);
base::OS::SNPrintF(buffer, kBufferSize,
- "%28s %10.3f ms / %5.1f %%"
- "%10u total / %5.1f %% "
- "%10u max %10u abs_max",
+ "%28s %10.3f (%5.1f%%) "
+ "%10u (%5.1f%%) %10u %10u",
name, ms, percent, stats.total_allocated_bytes_,
size_percent, stats.max_allocated_bytes_,
stats.absolute_max_allocated_bytes_);
os << buffer;
if (stats.function_name_.size() > 0) {
- os << " : " << stats.function_name_.c_str();
+ os << " " << stats.function_name_.c_str();
}
os << std::endl;
}
@@ -90,7 +89,10 @@ static void WriteFullLine(std::ostream& os) {
static void WriteHeader(std::ostream& os) {
WriteFullLine(os);
- os << " Turbofan timing results:\n";
+ os << " Turbonfan phase Time (ms) "
+ << " Space (bytes) Function\n"
+ << " "
+ << " Total Max. Abs. max.\n";
WriteFullLine(os);
}
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index c39936cc25..20aa558c3d 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -11,7 +11,6 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compiler/pipeline.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/deoptimizer.h"
@@ -19,11 +18,13 @@
#include "src/gdb-jit.h"
#include "src/hydrogen.h"
#include "src/interpreter/interpreter.h"
+#include "src/isolate-inl.h"
#include "src/lithium.h"
#include "src/log-inl.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/rewriter.h"
#include "src/runtime-profiler.h"
#include "src/scanner-character-streams.h"
@@ -119,7 +120,7 @@ bool CompilationInfo::has_scope() const {
CompilationInfo::CompilationInfo(ParseInfo* parse_info)
- : CompilationInfo(parse_info, nullptr, BASE, parse_info->isolate(),
+ : CompilationInfo(parse_info, nullptr, nullptr, BASE, parse_info->isolate(),
parse_info->zone()) {
// Compiling for the snapshot typically results in different code than
// compiling later on. This means that code recompiled with deoptimization
@@ -129,7 +130,7 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
// with deoptimization support.
if (isolate_->serializer_enabled()) EnableDeoptimizationSupport();
- if (FLAG_context_specialization) MarkAsContextSpecializing();
+ if (FLAG_function_context_specialization) MarkAsFunctionContextSpecializing();
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
if (FLAG_turbo_source_positions) MarkAsSourcePositionsEnabled();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
@@ -148,11 +149,18 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info)
CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
- : CompilationInfo(nullptr, stub, STUB, isolate, zone) {}
+ : CompilationInfo(nullptr, stub, CodeStub::MajorName(stub->MajorKey()),
+ STUB, isolate, zone) {}
+CompilationInfo::CompilationInfo(const char* debug_name, Isolate* isolate,
+ Zone* zone)
+ : CompilationInfo(nullptr, nullptr, debug_name, STUB, isolate, zone) {
+ set_output_code_kind(Code::STUB);
+}
CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
- Mode mode, Isolate* isolate, Zone* zone)
+ const char* debug_name, Mode mode,
+ Isolate* isolate, Zone* zone)
: parse_info_(parse_info),
isolate_(isolate),
flags_(0),
@@ -173,7 +181,20 @@ CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
parameter_count_(0),
optimization_id_(-1),
osr_expr_stack_height_(0),
- function_type_(nullptr) {}
+ function_type_(nullptr),
+ debug_name_(debug_name) {
+ // Parameter count is number of stack parameters.
+ if (code_stub_ != NULL) {
+ CodeStubDescriptor descriptor(code_stub_);
+ parameter_count_ = descriptor.GetStackParameterCount();
+ if (descriptor.function_mode() == NOT_JS_FUNCTION_STUB_MODE) {
+ parameter_count_--;
+ }
+ set_output_code_kind(code_stub->GetCodeKind());
+ } else {
+ set_output_code_kind(Code::FUNCTION);
+ }
+}
CompilationInfo::~CompilationInfo() {
@@ -188,6 +209,14 @@ CompilationInfo::~CompilationInfo() {
}
+void CompilationInfo::SetStub(CodeStub* code_stub) {
+ SetMode(STUB);
+ code_stub_ = code_stub;
+ debug_name_ = CodeStub::MajorName(code_stub->MajorKey());
+ set_output_code_kind(code_stub->GetCodeKind());
+}
+
+
int CompilationInfo::num_parameters() const {
return has_scope() ? scope()->num_parameters() : parameter_count_;
}
@@ -206,15 +235,6 @@ int CompilationInfo::num_heap_slots() const {
}
-Code::Flags CompilationInfo::flags() const {
- return code_stub() != nullptr
- ? Code::ComputeFlags(
- code_stub()->GetCodeKind(), code_stub()->GetICState(),
- code_stub()->GetExtraICState(), code_stub()->GetStubType())
- : Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
-}
-
-
// Primitive functions are unlikely to be picked up by the stack-walking
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
@@ -244,11 +264,6 @@ bool CompilationInfo::has_simple_parameters() {
}
-bool CompilationInfo::MayUseThis() const {
- return scope()->has_this_declaration() && scope()->receiver()->is_used();
-}
-
-
int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
SourcePosition position,
int parent_id) {
@@ -259,7 +274,7 @@ int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
shared->start_position());
if (!shared->script()->IsUndefined()) {
Handle<Script> script(Script::cast(shared->script()));
- info.script_id = script->id()->value();
+ info.script_id = script->id();
if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
@@ -302,11 +317,22 @@ void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
}
-Handle<Code> CompilationInfo::GenerateCodeStub() {
- // Run a "mini pipeline", extracted from compiler.cc.
- CHECK(Parser::ParseStatic(parse_info()));
- CHECK(Compiler::Analyze(parse_info()));
- return compiler::Pipeline(this).GenerateCode();
+base::SmartArrayPointer<char> CompilationInfo::GetDebugName() const {
+ if (parse_info()) {
+ AllowHandleDereference allow_deref;
+ return parse_info()->literal()->debug_name()->ToCString();
+ }
+ const char* str = debug_name_ ? debug_name_ : "unknown";
+ size_t len = strlen(str) + 1;
+ base::SmartArrayPointer<char> name(new char[len]);
+ memcpy(name.get(), str, len);
+ return name;
+}
+
+
+bool CompilationInfo::MustReplaceUndefinedReceiverWithGlobalProxy() {
+ return is_sloppy(language_mode()) && !is_native() &&
+ scope()->has_this_declaration() && scope()->receiver()->is_used();
}
@@ -414,7 +440,7 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (info()->shared_info()->asm_function()) {
if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
- info()->MarkAsContextSpecializing();
+ info()->MarkAsFunctionContextSpecializing();
} else if (FLAG_turbo_type_feedback) {
info()->MarkAsTypeFeedbackEnabled();
info()->EnsureFeedbackVector();
@@ -468,7 +494,9 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
// Type-check the function.
- AstTyper::Run(info());
+ AstTyper(info()->isolate(), info()->zone(), info()->closure(),
+ info()->scope(), info()->osr_ast_id(), info()->literal())
+ .Run();
// Optimization could have been disabled by the parser. Note that this check
// is only needed because the Hydrogen graph builder is missing some bailouts.
@@ -749,8 +777,8 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
// Context specialization folds-in the context, so no sharing can occur.
- if (info->is_context_specializing()) return;
- // Frame specialization implies context specialization.
+ if (info->is_function_context_specializing()) return;
+ // Frame specialization implies function context specialization.
DCHECK(!info->is_frame_specializing());
// Do not cache bound functions.
@@ -760,7 +788,7 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
// Cache optimized context-specific code.
if (FLAG_cache_optimized_code) {
Handle<SharedFunctionInfo> shared(function->shared());
- Handle<FixedArray> literals(function->literals());
+ Handle<LiteralsArray> literals(function->literals());
Handle<Context> native_context(function->context()->native_context());
SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
literals, info->osr_ast_id());
@@ -771,7 +799,7 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
// Cache optimized context-independent code.
if (FLAG_turbo_cache_shared_code && code->is_turbofanned()) {
- DCHECK(!info->is_context_specializing());
+ DCHECK(!info->is_function_context_specializing());
DCHECK(info->osr_ast_id().IsNone());
Handle<SharedFunctionInfo> shared(function->shared());
SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(shared, code);
@@ -940,8 +968,24 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
}
-bool Compiler::EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag) {
+MaybeHandle<Code> Compiler::GetStubCode(Handle<JSFunction> function,
+ CodeStub* stub) {
+ // Build a "hybrid" CompilationInfo for a JSFunction/CodeStub pair.
+ Zone zone;
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info);
+ info.SetFunctionType(stub->GetCallInterfaceDescriptor().GetFunctionType());
+ info.MarkAsFunctionContextSpecializing();
+ info.MarkAsDeoptimizationEnabled();
+ info.SetStub(stub);
+
+ // Run a "mini pipeline", extracted from compiler.cc.
+ if (!ParseAndAnalyze(&parse_info)) return MaybeHandle<Code>();
+ return compiler::Pipeline(&info).GenerateCode();
+}
+
+
+bool Compiler::Compile(Handle<JSFunction> function, ClearExceptionFlag flag) {
if (function->is_compiled()) return true;
MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
Handle<Code> code;
@@ -1233,8 +1277,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
script = isolate->factory()->NewScript(source);
if (!script_name.is_null()) {
script->set_name(*script_name);
- script->set_line_offset(Smi::FromInt(line_offset));
- script->set_column_offset(Smi::FromInt(column_offset));
+ script->set_line_offset(line_offset);
+ script->set_column_offset(column_offset);
}
script->set_origin_options(options);
Zone zone;
@@ -1351,12 +1395,13 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
// Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source);
if (natives == NATIVES_CODE) {
- script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+ script->set_type(Script::TYPE_NATIVE);
+ script->set_hide_source(true);
}
if (!script_name.is_null()) {
script->set_name(*script_name);
- script->set_line_offset(Smi::FromInt(line_offset));
- script->set_column_offset(Smi::FromInt(column_offset));
+ script->set_line_offset(line_offset);
+ script->set_column_offset(column_offset);
}
script->set_origin_options(resource_options);
if (!source_map_url.is_null()) {
@@ -1715,7 +1760,6 @@ bool CompilationPhase::ShouldProduceTraceOutput() const {
base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
}
-
#if DEBUG
void CompilationInfo::PrintAstForTesting() {
PrintF("--- Source from AST ---\n%s\n",
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 4775111362..45cf7b5183 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -121,7 +121,7 @@ class CompilationInfo {
kDeoptimizationSupport = 1 << 5,
kDebug = 1 << 6,
kSerializing = 1 << 7,
- kContextSpecializing = 1 << 8,
+ kFunctionContextSpecializing = 1 << 8,
kFrameSpecializing = 1 << 9,
kInliningEnabled = 1 << 10,
kTypingEnabled = 1 << 11,
@@ -135,6 +135,7 @@ class CompilationInfo {
explicit CompilationInfo(ParseInfo* parse_info);
CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
+ CompilationInfo(const char* debug_name, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
ParseInfo* parse_info() const { return parse_info_; }
@@ -150,7 +151,6 @@ class CompilationInfo {
Handle<JSFunction> closure() const;
FunctionLiteral* literal() const;
Scope* scope() const;
- bool MayUseThis() const;
Handle<Context> context() const;
Handle<SharedFunctionInfo> shared_info() const;
bool has_shared_info() const;
@@ -173,7 +173,6 @@ class CompilationInfo {
int num_parameters_including_this() const;
bool is_this_defined() const;
int num_heap_slots() const;
- Code::Flags flags() const;
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
@@ -222,9 +221,13 @@ class CompilationInfo {
bool will_serialize() const { return GetFlag(kSerializing); }
- void MarkAsContextSpecializing() { SetFlag(kContextSpecializing); }
+ void MarkAsFunctionContextSpecializing() {
+ SetFlag(kFunctionContextSpecializing);
+ }
- bool is_context_specializing() const { return GetFlag(kContextSpecializing); }
+ bool is_function_context_specializing() const {
+ return GetFlag(kFunctionContextSpecializing);
+ }
void MarkAsFrameSpecializing() { SetFlag(kFrameSpecializing); }
@@ -293,14 +296,14 @@ class CompilationInfo {
// Accessors for the different compilation modes.
bool IsOptimizing() const { return mode_ == OPTIMIZE; }
- bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
- DCHECK(!shared_info().is_null());
+ DCHECK(has_shared_info());
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
optimization_id_ = isolate()->NextOptimizationId();
+ set_output_code_kind(Code::OPTIMIZED_FUNCTION);
}
void SetFunctionType(Type::FunctionType* function_type) {
@@ -308,19 +311,19 @@ class CompilationInfo {
}
Type::FunctionType* function_type() const { return function_type_; }
- void SetStub(CodeStub* code_stub) {
- SetMode(STUB);
- code_stub_ = code_stub;
- }
+ void SetStub(CodeStub* code_stub);
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return GetFlag(kDeoptimizationSupport);
}
void EnableDeoptimizationSupport() {
- DCHECK(IsOptimizable());
+ DCHECK_EQ(BASE, mode_);
SetFlag(kDeoptimizationSupport);
}
+ bool ShouldEnsureSpaceForLazyDeopt() { return !IsStub(); }
+
+ bool MustReplaceUndefinedReceiverWithGlobalProxy();
// Determines whether or not to insert a self-optimization header.
bool ShouldSelfOptimize();
@@ -404,8 +407,6 @@ class CompilationInfo {
bool has_simple_parameters();
- Handle<Code> GenerateCodeStub();
-
typedef std::vector<Handle<SharedFunctionInfo>> InlinedFunctionList;
InlinedFunctionList const& inlined_functions() const {
return inlined_functions_;
@@ -414,6 +415,12 @@ class CompilationInfo {
inlined_functions_.push_back(inlined_function);
}
+ base::SmartArrayPointer<char> GetDebugName() const;
+
+ Code::Kind output_code_kind() const { return output_code_kind_; }
+
+ void set_output_code_kind(Code::Kind kind) { output_code_kind_ = kind; }
+
protected:
ParseInfo* parse_info_;
@@ -433,8 +440,9 @@ class CompilationInfo {
STUB
};
- CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub, Mode mode,
- Isolate* isolate, Zone* zone);
+ CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
+ const char* debug_name, Mode mode, Isolate* isolate,
+ Zone* zone);
Isolate* isolate_;
@@ -452,6 +460,8 @@ class CompilationInfo {
unsigned flags_;
+ Code::Kind output_code_kind_;
+
// For compiled stubs, the stub object
CodeStub* code_stub_;
// The compiled code.
@@ -503,6 +513,8 @@ class CompilationInfo {
Type::FunctionType* function_type_;
+ const char* debug_name_;
+
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -625,9 +637,13 @@ class Compiler : public AllStatic {
Handle<JSFunction> function);
MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
Handle<JSFunction> function);
+ MUST_USE_RESULT static MaybeHandle<Code> GetStubCode(
+ Handle<JSFunction> function, CodeStub* stub);
+ static bool Compile(Handle<JSFunction> function, ClearExceptionFlag flag);
static bool CompileDebugCode(Handle<JSFunction> function);
static bool CompileDebugCode(Handle<SharedFunctionInfo> shared);
+ static void CompileForLiveEdit(Handle<Script> script);
// Parser::Parse, then Compiler::Analyze.
static bool ParseAndAnalyze(ParseInfo* info);
@@ -636,11 +652,6 @@ class Compiler : public AllStatic {
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
- static bool EnsureCompiled(Handle<JSFunction> function,
- ClearExceptionFlag flag);
-
- static void CompileForLiveEdit(Handle<Script> script);
-
// Compile a String source within a context for eval.
MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
Handle<String> source, Handle<SharedFunctionInfo> outer_info,
diff --git a/deps/v8/src/compiler/DEPS b/deps/v8/src/compiler/DEPS
deleted file mode 100644
index 60e2f6d742..0000000000
--- a/deps/v8/src/compiler/DEPS
+++ /dev/null
@@ -1,3 +0,0 @@
-include_rules = [
- "-src/v8.h",
-]
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index b54982f4c9..8a03ff77f3 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -67,14 +67,12 @@ FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
// static
-FieldAccess AccessBuilder::ForFixedArrayLength() {
- // TODO(turbofan): 2^30 is a valid upper limit for the FixedArray::length
- // field, although it's not the best. If we had a Zone we could create an
- // appropriate range type instead.
+FieldAccess AccessBuilder::ForFixedArrayLength(Zone* zone) {
STATIC_ASSERT(FixedArray::kMaxLength <= 1 << 30);
FieldAccess access = {
kTaggedBase, FixedArray::kLengthOffset, MaybeHandle<Name>(),
- Type::Intersect(Type::Unsigned30(), Type::TaggedSigned()),
+ Type::Intersect(Type::Range(0, FixedArray::kMaxLength, zone),
+ Type::TaggedSigned(), zone),
kMachAnyTagged};
return access;
}
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 95be3e0dd8..762ab64d52 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -41,7 +41,7 @@ class AccessBuilder final : public AllStatic {
static FieldAccess ForJSDateField(JSDate::FieldIndex index);
// Provides access to FixedArray::length() field.
- static FieldAccess ForFixedArrayLength();
+ static FieldAccess ForFixedArrayLength(Zone* zone);
// Provides access to DescriptorArray::enum_cache() field.
static FieldAccess ForDescriptorArrayEnumCache();
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index dca6d4e3ec..796d132a34 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -220,12 +220,27 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return ls;
case kUnsignedGreaterThan:
return hi;
+ case kFloatLessThanOrUnordered:
+ return lt;
+ case kFloatGreaterThanOrEqual:
+ return ge;
+ case kFloatLessThanOrEqual:
+ return ls;
+ case kFloatGreaterThanOrUnordered:
+ return hi;
+ case kFloatLessThan:
+ return lo;
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return hs;
+ case kFloatLessThanOrEqualOrUnordered:
+ return le;
+ case kFloatGreaterThan:
+ return gt;
case kOverflow:
return vs;
case kNotOverflow:
return vc;
- case kUnorderedEqual:
- case kUnorderedNotEqual:
+ default:
break;
}
UNREACHABLE();
@@ -573,8 +588,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
- DCHECK(i.InputDouble(1) == 0.0);
- __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputDouble(1));
+ DCHECK(i.InputFloat32(1) == 0.0f);
+ __ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
break;
@@ -816,10 +831,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
case kArmPush:
if (instr->InputAt(0)->IsDoubleRegister()) {
- __ vstr(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
- __ sub(sp, sp, Operand(kDoubleSize));
+ __ vpush(i.InputDoubleRegister(0));
} else {
- __ Push(i.InputRegister(0));
+ __ push(i.InputRegister(0));
}
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
@@ -878,6 +892,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
} // NOLINT(readability/fn_size)
@@ -1240,20 +1258,22 @@ void CodeGenerator::AddNopForSmiCodeInlining() {
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block literal pool emission for duration of padding.
- v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block literal pool emission for duration of padding.
+ v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
}
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index aa59f2cbb7..f58a29de8a 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -925,6 +925,19 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kArmVmovLowU32F64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmovLowF64U32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
ArmOperandGenerator g(this);
Float32BinopMatcher m(node);
@@ -1254,22 +1267,37 @@ void InstructionSelector::VisitTailCall(Node* node) {
namespace {
+// Shared routine for multiple compare operations.
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ ArmOperandGenerator g(selector);
+ opcode = cont->Encode(opcode);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), left, right,
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+ }
+}
+
+
// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Float32BinopMatcher m(node);
- InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node())
- : g.UseRegister(m.right().node());
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArmVcmpF32), g.NoOutput(),
- g.UseRegister(m.left().node()), rhs,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ if (m.right().Is(0.0f)) {
+ VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0f)) {
+ cont->Commute();
+ VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
} else {
- DCHECK(cont->IsSet());
- selector->Emit(cont->Encode(kArmVcmpF32),
- g.DefineAsRegister(cont->result()),
- g.UseRegister(m.left().node()), rhs);
+ VisitCompare(selector, kArmVcmpF32, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
}
}
@@ -1279,17 +1307,16 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
ArmOperandGenerator g(selector);
Float64BinopMatcher m(node);
- InstructionOperand rhs = m.right().Is(0.0) ? g.UseImmediate(m.right().node())
- : g.UseRegister(m.right().node());
- if (cont->IsBranch()) {
- selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(),
- g.UseRegister(m.left().node()), rhs,
- g.Label(cont->true_block()), g.Label(cont->false_block()));
+ if (m.right().Is(0.0)) {
+ VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0)) {
+ cont->Commute();
+ VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
} else {
- DCHECK(cont->IsSet());
- selector->Emit(cont->Encode(kArmVcmpF64),
- g.DefineAsRegister(cont->result()),
- g.UseRegister(m.left().node()), rhs);
+ VisitCompare(selector, kArmVcmpF64, g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()), cont);
}
}
@@ -1376,19 +1403,19 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThan);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat32LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
return VisitFloat32Compare(selector, value, cont);
case IrOpcode::kFloat64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThan:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThan);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ cont->OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
return VisitFloat64Compare(selector, value, cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
@@ -1552,13 +1579,13 @@ void InstructionSelector::VisitFloat32Equal(Node* node) {
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont(kFloatLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
@@ -1570,13 +1597,13 @@ void InstructionSelector::VisitFloat64Equal(Node* node) {
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 1b68577772..257dd6c134 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -259,6 +259,22 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return ls;
case kUnsignedGreaterThan:
return hi;
+ case kFloatLessThanOrUnordered:
+ return lt;
+ case kFloatGreaterThanOrEqual:
+ return ge;
+ case kFloatLessThanOrEqual:
+ return ls;
+ case kFloatGreaterThanOrUnordered:
+ return hi;
+ case kFloatLessThan:
+ return lo;
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return hs;
+ case kFloatLessThanOrEqualOrUnordered:
+ return le;
+ case kFloatGreaterThan:
+ return gt;
case kOverflow:
return vs;
case kNotOverflow:
@@ -302,6 +318,20 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
+#define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr) \
+ do { \
+ auto result = i.OutputRegister(); \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ __ Cmp(offset, length); \
+ auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+ __ B(hs, ool->entry()); \
+ __ asm_instr(result, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(ool->exit()); \
+ } while (0)
+
+
#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
do { \
auto buffer = i.InputRegister(0); \
@@ -330,6 +360,20 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
+#define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr) \
+ do { \
+ auto buffer = i.InputRegister(0); \
+ auto offset = i.InputRegister32(1); \
+ auto length = i.InputOperand32(2); \
+ auto value = i.InputRegister(3); \
+ __ Cmp(offset, length); \
+ Label done; \
+ __ B(hs, &done); \
+ __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
+ __ Bind(&done); \
+ } while (0)
+
+
#define ASSEMBLE_SHIFT(asm_instr, width) \
do { \
if (instr->InputAt(1)->IsRegister()) { \
@@ -726,8 +770,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by fcmp instructions.
- DCHECK(i.InputDouble(1) == 0.0);
- __ Fcmp(i.InputFloat32Register(0), i.InputDouble(1));
+ DCHECK(i.InputFloat32(1) == 0.0f);
+ __ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1));
}
break;
case kArm64Float32Add:
@@ -858,10 +902,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Fmov(i.OutputFloat64Register(), tmp);
break;
}
- case kArm64Float64MoveU64: {
+ case kArm64Float64MoveU64:
__ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
break;
- }
+ case kArm64U64MoveFloat64:
+ __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
@@ -942,6 +988,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
break;
+ case kCheckedLoadWord64:
+ ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
+ break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(32);
break;
@@ -957,6 +1006,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(Str);
break;
+ case kCheckedStoreWord64:
+ ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
+ break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(32);
break;
@@ -1346,22 +1398,24 @@ void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- intptr_t current_pc = masm()->pc_offset();
-
- if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
- intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK((padding_size % kInstructionSize) == 0);
- InstructionAccurateScope instruction_accurate(
- masm(), padding_size / kInstructionSize);
-
- while (padding_size > 0) {
- __ nop();
- padding_size -= kInstructionSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ intptr_t current_pc = masm()->pc_offset();
+
+ if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+ intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK((padding_size % kInstructionSize) == 0);
+ InstructionAccurateScope instruction_accurate(
+ masm(), padding_size / kInstructionSize);
+
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= kInstructionSize;
}
}
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index f76854611e..c2a52af7cb 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -115,6 +115,7 @@ namespace compiler {
V(Arm64Float64InsertLowWord32) \
V(Arm64Float64InsertHighWord32) \
V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 76b1059eb5..7a5b84275a 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -458,6 +458,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedLoadWord64;
+ break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
@@ -491,6 +494,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedStoreWord64;
+ break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
@@ -1270,6 +1276,26 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kArm64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kArm64U64MoveFloat64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kArm64Float64MoveU64, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kArm64Float64MoveU64, node);
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kArm64Float32Add, node);
}
@@ -1645,7 +1671,7 @@ void VisitWord64Test(InstructionSelector* selector, Node* node,
}
-// Shared routine for multiple float64 compare operations.
+// Shared routine for multiple float32 compare operations.
void VisitFloat32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Arm64OperandGenerator g(selector);
@@ -1653,6 +1679,10 @@ void VisitFloat32Compare(InstructionSelector* selector, Node* node,
if (m.right().Is(0.0f)) {
VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0f)) {
+ cont->Commute();
+ VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
} else {
VisitCompare(selector, kArm64Float32Cmp, g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()), cont);
@@ -1668,6 +1698,10 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
if (m.right().Is(0.0)) {
VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
g.UseImmediate(m.right().node()), cont);
+ } else if (m.left().Is(0.0)) {
+ cont->Commute();
+ VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.right().node()),
+ g.UseImmediate(m.left().node()), cont);
} else {
VisitCompare(selector, kArm64Float64Cmp, g.UseRegister(m.left().node()),
g.UseRegister(m.right().node()), cont);
@@ -1739,19 +1773,19 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
cont.OverwriteAndNegateIfEqual(kEqual);
return VisitFloat32Compare(this, value, &cont);
case IrOpcode::kFloat32LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ cont.OverwriteAndNegateIfEqual(kFloatLessThan);
return VisitFloat32Compare(this, value, &cont);
case IrOpcode::kFloat32LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
return VisitFloat32Compare(this, value, &cont);
case IrOpcode::kFloat64Equal:
cont.OverwriteAndNegateIfEqual(kEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThan:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ cont.OverwriteAndNegateIfEqual(kFloatLessThan);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kFloat64LessThanOrEqual:
- cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ cont.OverwriteAndNegateIfEqual(kFloatLessThanOrEqual);
return VisitFloat64Compare(this, value, &cont);
case IrOpcode::kProjection:
// Check if this is the overflow output projection of an
@@ -1992,13 +2026,13 @@ void InstructionSelector::VisitFloat32Equal(Node* node) {
void InstructionSelector::VisitFloat32LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont(kFloatLessThan, node);
VisitFloat32Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
VisitFloat32Compare(this, node, &cont);
}
@@ -2010,13 +2044,13 @@ void InstructionSelector::VisitFloat64Equal(Node* node) {
void InstructionSelector::VisitFloat64LessThan(Node* node) {
- FlagsContinuation cont(kUnsignedLessThan, node);
+ FlagsContinuation cont(kFloatLessThan, node);
VisitFloat64Compare(this, node, &cont);
}
void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
- FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+ FlagsContinuation cont(kFloatLessThanOrEqual, node);
VisitFloat64Compare(this, node, &cont);
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index 947d2e3b18..f8f010d816 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -568,11 +568,6 @@ void AstGraphBuilder::CreateGraphBody(bool stack_check) {
// Build the arguments object if it is used.
BuildArgumentsObject(scope->arguments());
- // Build rest arguments array if it is used.
- int rest_index;
- Variable* rest_parameter = scope->rest_parameter(&rest_index);
- BuildRestArgumentsArray(rest_parameter, rest_index);
-
// Build assignment to {.this_function} variable if it is used.
BuildThisFunctionVariable(scope->this_function_var());
@@ -686,7 +681,8 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
}
// Bind all parameter variables. The parameter indices are shifted by 1
- // (receiver is parameter index -1 but environment index 0).
+ // (receiver is variable index -1 but {Parameter} node index 0 and located at
+ // index 0 in the environment).
for (int i = 0; i < scope->num_parameters(); ++i) {
const char* debug_name = GetDebugParameterName(graph()->zone(), scope, i);
const Operator* op = common()->Parameter(param_num++, debug_name);
@@ -725,8 +721,8 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy,
void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
DCHECK(variable->IsStackAllocated());
if (variable->IsParameter()) {
- // The parameter indices are shifted by 1 (receiver is parameter
- // index -1 but environment index 0).
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but located at index 0 in the environment).
values()->at(variable->index() + 1) = node;
} else {
DCHECK(variable->IsStackLocal());
@@ -742,8 +738,8 @@ void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
DCHECK(variable->IsStackAllocated());
if (variable->IsParameter()) {
- // The parameter indices are shifted by 1 (receiver is parameter
- // index -1 but environment index 0).
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but located at index 0 in the environment).
return values()->at(variable->index() + 1);
} else {
DCHECK(variable->IsStackLocal());
@@ -1138,7 +1134,7 @@ void AstGraphBuilder::VisitBlock(Block* stmt) {
VisitStatements(stmt->statements());
} else {
// Visit declarations and statements in a block scope.
- if (stmt->scope()->ContextLocalCount() > 0) {
+ if (stmt->scope()->NeedsContext()) {
Node* context = BuildLocalBlockContext(stmt->scope());
ContextScope scope(this, stmt->scope(), context);
VisitDeclarations(stmt->scope()->declarations());
@@ -1162,6 +1158,12 @@ void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
}
+void AstGraphBuilder::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
IfBuilder compare_if(this);
VisitForTest(stmt->condition());
@@ -1419,16 +1421,13 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Create a catch scope that binds the exception.
Node* exception = try_control.GetExceptionNode();
- Unique<String> name = MakeUnique(stmt->variable()->name());
+ Handle<String> name = stmt->variable()->name();
const Operator* op = javascript()->CreateCatchContext(name);
Node* context = NewNode(op, exception, GetFunctionClosureForContext());
// Evaluate the catch-block.
VisitInScope(stmt->catch_block(), stmt->scope(), context);
try_control.EndCatch();
-
- // TODO(mstarzinger): Remove bailout once everything works.
- if (!FLAG_turbo_try_catch) SetStackOverflow();
}
@@ -1514,8 +1513,6 @@ void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- Node* context = current_context();
-
// Find or build a shared function info.
Handle<SharedFunctionInfo> shared_info =
Compiler::GetSharedFunctionInfo(expr, info()->script(), info());
@@ -1524,7 +1521,7 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
// Create node to instantiate a new closure.
PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
- Node* value = NewNode(op, context);
+ Node* value = NewNode(op);
ast_context()->ProduceValue(value);
}
@@ -1563,10 +1560,7 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
Node* name = environment()->Pop();
Node* start = jsgraph()->Constant(expr->start_position());
Node* end = jsgraph()->Constant(expr->end_position());
- const Operator* opc = javascript()->CallRuntime(
- is_strong(language_mode()) ? Runtime::kDefineClassStrong
- : Runtime::kDefineClass,
- 5);
+ const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass, 5);
Node* literal = NewNode(opc, name, extends, constructor, start, end);
PrepareFrameState(literal, expr->CreateLiteralId(),
OutputFrameStateCombine::Push());
@@ -1582,7 +1576,6 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
environment()->Push(proto);
// Create nodes to store method values into the literal.
- int store_slot_index = 0;
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
environment()->Push(property->is_static() ? literal : proto);
@@ -1605,9 +1598,8 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
Node* value = environment()->Pop();
Node* key = environment()->Pop();
Node* receiver = environment()->Pop();
- VectorSlotPair feedback = CreateVectorSlotPair(
- expr->SlotForHomeObject(property->value(), &store_slot_index));
- BuildSetHomeObject(value, receiver, property->value(), feedback);
+
+ BuildSetHomeObject(value, receiver, property);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1650,10 +1642,9 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
DCHECK_NOT_NULL(expr->class_variable_proxy());
Variable* var = expr->class_variable_proxy()->var();
FrameStateBeforeAndAfter states(this, BailoutId::None());
- VectorSlotPair feedback =
- CreateVectorSlotPair(FLAG_vector_stores && var->IsUnallocated()
- ? expr->GetNthSlot(store_slot_index++)
- : FeedbackVectorICSlot::Invalid());
+ VectorSlotPair feedback = CreateVectorSlotPair(
+ expr->NeedsProxySlot() ? expr->ProxySlot()
+ : FeedbackVectorICSlot::Invalid());
BuildVariableAssignment(var, literal, Token::INIT_CONST, feedback,
BailoutId::None(), states);
}
@@ -1732,7 +1723,6 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Create nodes to store computed values into the literal.
int property_index = 0;
- int store_slot_index = 0;
AccessorTable accessor_table(zone());
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
@@ -1756,17 +1746,12 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* value = environment()->Pop();
Handle<Name> name = key->AsPropertyName();
VectorSlotPair feedback =
- FLAG_vector_stores
- ? CreateVectorSlotPair(expr->GetNthSlot(store_slot_index++))
- : VectorSlotPair();
+ CreateVectorSlotPair(property->GetSlot(0));
Node* store = BuildNamedStore(literal, name, value, feedback,
TypeFeedbackId::None());
states.AddToNode(store, key->id(),
OutputFrameStateCombine::Ignore());
- VectorSlotPair home_feedback = CreateVectorSlotPair(
- expr->SlotForHomeObject(property->value(), &store_slot_index));
- BuildSetHomeObject(value, literal, property->value(),
- home_feedback);
+ BuildSetHomeObject(value, literal, property, 1);
} else {
VisitForEffect(property->value());
}
@@ -1785,9 +1770,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* set_property = NewNode(op, receiver, key, value, language);
// SetProperty should not lazy deopt on an object literal.
PrepareFrameState(set_property, BailoutId::None());
- VectorSlotPair home_feedback = CreateVectorSlotPair(
- expr->SlotForHomeObject(property->value(), &store_slot_index));
- BuildSetHomeObject(value, receiver, property->value(), home_feedback);
+ BuildSetHomeObject(value, receiver, property);
}
break;
}
@@ -1806,12 +1789,12 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = property->value();
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = property->value();
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1822,16 +1805,8 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
VisitForValue(it->first);
- VisitForValueOrNull(it->second->getter);
- VectorSlotPair feedback_getter = CreateVectorSlotPair(
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
- BuildSetHomeObject(environment()->Top(), literal, it->second->getter,
- feedback_getter);
- VisitForValueOrNull(it->second->setter);
- VectorSlotPair feedback_setter = CreateVectorSlotPair(
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
- BuildSetHomeObject(environment()->Top(), literal, it->second->setter,
- feedback_setter);
+ VisitObjectLiteralAccessor(literal, it->second->getter);
+ VisitObjectLiteralAccessor(literal, it->second->setter);
Node* setter = environment()->Pop();
Node* getter = environment()->Pop();
Node* name = environment()->Pop();
@@ -1876,9 +1851,7 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Node* value = environment()->Pop();
Node* key = environment()->Pop();
Node* receiver = environment()->Pop();
- VectorSlotPair feedback = CreateVectorSlotPair(
- expr->SlotForHomeObject(property->value(), &store_slot_index));
- BuildSetHomeObject(value, receiver, property->value(), feedback);
+ BuildSetHomeObject(value, receiver, property);
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
case ObjectLiteral::Property::COMPUTED:
@@ -1919,14 +1892,21 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
NewNode(op, literal);
}
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
-
ast_context()->ProduceValue(environment()->Pop());
}
+void AstGraphBuilder::VisitObjectLiteralAccessor(
+ Node* home_object, ObjectLiteralProperty* property) {
+ if (property == nullptr) {
+ VisitForValueOrNull(nullptr);
+ } else {
+ VisitForValue(property->value());
+ BuildSetHomeObject(environment()->Top(), home_object, property);
+ }
+}
+
+
void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Node* closure = GetFunctionClosure();
@@ -1984,10 +1964,8 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
if (subexpr->IsSpread()) {
VisitForValue(subexpr->AsSpread()->expression());
Node* iterable = environment()->Pop();
- Node* builtins = BuildLoadBuiltinsObject();
- Node* function = BuildLoadObjectField(
- builtins, JSBuiltinsObject::OffsetOfFunctionWithId(
- Builtins::CONCAT_ITERABLE_TO_ARRAY));
+ Node* function = BuildLoadNativeContextField(
+ Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX);
result = NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS,
language_mode()),
function, array, iterable);
@@ -2537,18 +2515,12 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
- Handle<String> name = expr->name();
-
// The callee and the receiver both have to be pushed onto the operand stack
// before arguments are being evaluated.
CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
- Node* receiver_value = BuildLoadBuiltinsObject();
- VectorSlotPair pair = CreateVectorSlotPair(expr->CallRuntimeFeedbackSlot());
- // TODO(jarin): bailout ids for runtime calls.
- FrameStateBeforeAndAfter states(this, BailoutId::None());
- Node* callee_value = BuildNamedLoad(receiver_value, name, pair);
- states.AddToNode(callee_value, BailoutId::None(),
- OutputFrameStateCombine::Push());
+ Node* callee_value = BuildLoadNativeContextField(expr->context_index());
+ Node* receiver_value = jsgraph()->UndefinedConstant();
+
environment()->Push(callee_value);
environment()->Push(receiver_value);
@@ -2566,15 +2538,14 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- const Runtime::Function* function = expr->function();
-
// Handle calls to runtime functions implemented in JavaScript separately as
// the call follows JavaScript ABI and the callee is statically unknown.
if (expr->is_jsruntime()) {
- DCHECK(function == NULL && expr->name()->length() > 0);
return VisitCallJSRuntime(expr);
}
+ const Runtime::Function* function = expr->function();
+
// TODO(mstarzinger): This bailout is a gigantic hack, the owner is ashamed.
if (function->function_id == Runtime::kInlineGeneratorNext ||
function->function_id == Runtime::kInlineGeneratorThrow) {
@@ -2866,6 +2837,12 @@ void AstGraphBuilder::VisitSpread(Spread* expr) {
}
+void AstGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
+ // Handled entirely by the parser itself.
+ UNREACHABLE();
+}
+
+
void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
Node* value = GetFunctionClosure();
ast_context()->ProduceValue(value);
@@ -2927,9 +2904,7 @@ void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
LoopBuilder* loop) {
ControlScopeForIteration scope(this, stmt, loop);
- // TODO(mstarzinger): For now we only allow to interrupt non-asm.js code,
- // which is a gigantic hack and should be extended to all code at some point.
- if (!info()->shared_info()->asm_function()) {
+ if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
Node* node = NewNode(javascript()->StackCheck());
PrepareFrameState(node, stmt->StackCheckId());
}
@@ -3100,22 +3075,21 @@ Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object). Otherwise there is nothing left to do here.
- if (is_strict(language_mode()) || info()->is_native()) return receiver;
-
- // There is no need to perform patching if the receiver will never be used.
- if (!info()->MayUseThis()) return receiver;
-
- IfBuilder receiver_check(this);
- Node* undefined = jsgraph()->UndefinedConstant();
- Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
- receiver_check.If(check);
- receiver_check.Then();
- Node* proxy = BuildLoadGlobalProxy();
- environment()->Push(proxy);
- receiver_check.Else();
- environment()->Push(receiver);
- receiver_check.End();
- return environment()->Pop();
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
+ IfBuilder receiver_check(this);
+ Node* undefined = jsgraph()->UndefinedConstant();
+ Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
+ receiver_check.If(check);
+ receiver_check.Then();
+ Node* proxy = BuildLoadGlobalProxy();
+ environment()->Push(proxy);
+ receiver_check.Else();
+ environment()->Push(receiver);
+ receiver_check.End();
+ return environment()->Pop();
+ } else {
+ return receiver;
+ }
}
@@ -3158,10 +3132,10 @@ Node* AstGraphBuilder::BuildLocalScriptContext(Scope* scope) {
DCHECK(scope->is_script_scope());
// Allocate a new local context.
- const Operator* op = javascript()->CreateScriptContext();
- Node* scope_info = jsgraph()->Constant(scope->GetScopeInfo(isolate()));
- Node* local_context = NewNode(op, GetFunctionClosure(), scope_info);
- PrepareFrameState(local_context, BailoutId::FunctionEntry());
+ Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+ const Operator* op = javascript()->CreateScriptContext(scope_info);
+ Node* local_context = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(local_context, BailoutId::Prologue());
return local_context;
}
@@ -3171,9 +3145,9 @@ Node* AstGraphBuilder::BuildLocalBlockContext(Scope* scope) {
DCHECK(scope->is_block_scope());
// Allocate a new local context.
- const Operator* op = javascript()->CreateBlockContext();
- Node* scope_info = jsgraph()->Constant(scope->GetScopeInfo(isolate()));
- Node* local_context = NewNode(op, scope_info, GetFunctionClosureForContext());
+ Handle<ScopeInfo> scope_info = scope->GetScopeInfo(isolate());
+ const Operator* op = javascript()->CreateBlockContext(scope_info);
+ Node* local_context = NewNode(op, GetFunctionClosureForContext());
return local_context;
}
@@ -3183,13 +3157,17 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
if (arguments == NULL) return NULL;
// Allocate and initialize a new arguments object.
- Node* callee = GetFunctionClosure();
- const Operator* op = javascript()->CallRuntime(Runtime::kNewArguments, 1);
- Node* object = NewNode(op, callee);
-
- // Assign the object to the arguments variable.
+ CreateArgumentsParameters::Type type =
+ is_strict(language_mode()) || !info()->has_simple_parameters()
+ ? CreateArgumentsParameters::kUnmappedArguments
+ : CreateArgumentsParameters::kMappedArguments;
+ const Operator* op = javascript()->CreateArguments(type, 0);
+ Node* object = NewNode(op, GetFunctionClosure());
+ PrepareFrameState(object, BailoutId::None());
+
+ // Assign the object to the {arguments} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
- // This should never lazy deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(arguments, object, Token::ASSIGN, VectorSlotPair(),
BailoutId::None(), states);
@@ -3197,31 +3175,14 @@ Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
}
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest, int index) {
- if (rest == NULL) return NULL;
-
- DCHECK(index >= 0);
- const Operator* op = javascript()->CallRuntime(Runtime::kNewRestParamSlow, 2);
- Node* object = NewNode(op, jsgraph()->SmiConstant(index),
- jsgraph()->SmiConstant(language_mode()));
-
- // Assign the object to the rest parameter variable.
- DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
- // This should never lazy deopt, so it is fine to send invalid bailout id.
- FrameStateBeforeAndAfter states(this, BailoutId::None());
- BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
- BailoutId::None(), states);
- return object;
-}
-
-
Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
if (this_function_var == nullptr) return nullptr;
// Retrieve the closure we were called with.
Node* this_function = GetFunctionClosure();
- // Assign the object to the {.this_function} variable.
+ // Assign the object to the {.this_function} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(this_function_var, this_function, Token::INIT_CONST,
VectorSlotPair(), BailoutId::None(), states);
@@ -3237,7 +3198,8 @@ Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
javascript()->CallRuntime(Runtime::kGetOriginalConstructor, 0);
Node* object = NewNode(op);
- // Assign the object to the {new.target} variable.
+ // Assign the object to the {new.target} variable. This should never lazy
+ // deopt, so it is fine to send invalid bailout id.
FrameStateBeforeAndAfter states(this, BailoutId::None());
BuildVariableAssignment(new_target_var, object, Token::INIT_CONST,
VectorSlotPair(), BailoutId::None(), states);
@@ -3506,13 +3468,10 @@ Node* AstGraphBuilder::BuildVariableAssignment(
return value;
} else if (mode == LET && op != Token::INIT_LET) {
// Perform an initialization check for let declared variables.
- // Also note that the dynamic hole-check is only done to ensure that
- // this does not break in the presence of do-expressions within the
- // temporal dead zone of a let declared variable.
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
value = BuildThrowReferenceError(variable, bailout_id);
- } else if (value->opcode() == IrOpcode::kPhi) {
+ } else if (current->opcode() == IrOpcode::kPhi) {
value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op == Token::INIT_CONST) {
@@ -3528,7 +3487,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = environment()->Lookup(variable);
if (current->op() == the_hole->op()) {
return BuildThrowReferenceError(variable, bailout_id);
- } else if (value->opcode() == IrOpcode::kPhi) {
+ } else if (current->opcode() == IrOpcode::kPhi) {
BuildHoleCheckThenThrow(current, variable, value, bailout_id);
}
return BuildThrowConstAssignError(bailout_id);
@@ -3625,8 +3584,7 @@ Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
const VectorSlotPair& feedback) {
- const Operator* op =
- javascript()->LoadNamed(MakeUnique(name), feedback, language_mode());
+ const Operator* op = javascript()->LoadNamed(name, feedback, language_mode());
Node* node = NewNode(op, object, BuildLoadFeedbackVector());
return Record(js_type_feedback_, node, feedback.slot());
}
@@ -3649,7 +3607,7 @@ Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
const VectorSlotPair& feedback,
TypeFeedbackId id) {
const Operator* op =
- javascript()->StoreNamed(language_mode(), MakeUnique(name), feedback);
+ javascript()->StoreNamed(language_mode(), name, feedback);
Node* node = NewNode(op, object, value, BuildLoadFeedbackVector());
if (FLAG_vector_stores) {
return Record(js_type_feedback_, node, feedback.slot());
@@ -3709,8 +3667,8 @@ Node* AstGraphBuilder::BuildGlobalLoad(Node* script_context, Node* global,
Handle<Name> name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode, int slot_index) {
- const Operator* op = javascript()->LoadGlobal(MakeUnique(name), feedback,
- typeof_mode, slot_index);
+ const Operator* op =
+ javascript()->LoadGlobal(name, feedback, typeof_mode, slot_index);
Node* node = NewNode(op, script_context, global, BuildLoadFeedbackVector());
return Record(js_type_feedback_, node, feedback.slot());
}
@@ -3720,8 +3678,8 @@ Node* AstGraphBuilder::BuildGlobalStore(Node* script_context, Node* global,
Handle<Name> name, Node* value,
const VectorSlotPair& feedback,
TypeFeedbackId id, int slot_index) {
- const Operator* op = javascript()->StoreGlobal(
- language_mode(), MakeUnique(name), feedback, slot_index);
+ const Operator* op =
+ javascript()->StoreGlobal(language_mode(), name, feedback, slot_index);
Node* node =
NewNode(op, script_context, global, value, BuildLoadFeedbackVector());
if (FLAG_vector_stores) {
@@ -3744,14 +3702,6 @@ Node* AstGraphBuilder::BuildLoadImmutableObjectField(Node* object, int offset) {
}
-Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
- Node* global = BuildLoadGlobalObject();
- Node* builtins =
- BuildLoadObjectField(global, JSGlobalObject::kBuiltinsOffset);
- return builtins;
-}
-
-
Node* AstGraphBuilder::BuildLoadGlobalObject() {
const Operator* load_op =
javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
@@ -3759,6 +3709,14 @@ Node* AstGraphBuilder::BuildLoadGlobalObject() {
}
+Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
+ Node* global = BuildLoadGlobalObject();
+ Node* native_context =
+ BuildLoadObjectField(global, GlobalObject::kNativeContextOffset);
+ return NewNode(javascript()->LoadContext(0, index, true), native_context);
+}
+
+
Node* AstGraphBuilder::BuildLoadGlobalProxy() {
Node* global = BuildLoadGlobalObject();
Node* proxy =
@@ -3806,7 +3764,7 @@ Node* AstGraphBuilder::BuildToBoolean(Node* input) {
return jsgraph_->BooleanConstant(!m.Is(0) && !m.IsNaN());
}
case IrOpcode::kHeapConstant: {
- Handle<HeapObject> object = HeapObjectMatcher(input).Value().handle();
+ Handle<HeapObject> object = HeapObjectMatcher(input).Value();
return jsgraph_->BooleanConstant(object->BooleanValue());
}
case IrOpcode::kJSEqual:
@@ -3848,11 +3806,14 @@ Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
- Expression* expr,
- const VectorSlotPair& feedback) {
+ ObjectLiteralProperty* property,
+ int slot_number) {
+ Expression* expr = property->value();
if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
Handle<Name> name = isolate()->factory()->home_object_symbol();
FrameStateBeforeAndAfter states(this, BailoutId::None());
+ VectorSlotPair feedback =
+ CreateVectorSlotPair(property->GetSlot(slot_number));
Node* store = BuildNamedStore(value, name, home_object, feedback,
TypeFeedbackId::None());
states.AddToNode(store, BailoutId::None(), OutputFrameStateCombine::Ignore());
@@ -4258,12 +4219,12 @@ Node* AstGraphBuilder::MergeControl(Node* control, Node* other) {
// Control node for loop exists, add input.
const Operator* op = common()->Loop(inputs);
control->AppendInput(graph_zone(), other);
- control->set_op(op);
+ NodeProperties::ChangeOp(control, op);
} else if (control->opcode() == IrOpcode::kMerge) {
// Control node for merge exists, add input.
const Operator* op = common()->Merge(inputs);
control->AppendInput(graph_zone(), other);
- control->set_op(op);
+ NodeProperties::ChangeOp(control, op);
} else {
// Control node is a singleton, introduce a merge.
const Operator* op = common()->Merge(inputs);
@@ -4279,8 +4240,8 @@ Node* AstGraphBuilder::MergeEffect(Node* value, Node* other, Node* control) {
if (value->opcode() == IrOpcode::kEffectPhi &&
NodeProperties::GetControlInput(value) == control) {
// Phi already exists, add input.
- value->set_op(common()->EffectPhi(inputs));
value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(value, common()->EffectPhi(inputs));
} else if (value != other) {
// Phi does not exist yet, introduce one.
value = NewEffectPhi(inputs, value, control);
@@ -4295,8 +4256,8 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
if (value->opcode() == IrOpcode::kPhi &&
NodeProperties::GetControlInput(value) == control) {
// Phi already exists, add input.
- value->set_op(common()->Phi(kMachAnyTagged, inputs));
value->InsertInput(graph_zone(), inputs - 1, other);
+ NodeProperties::ChangeOp(value, common()->Phi(kMachAnyTagged, inputs));
} else if (value != other) {
// Phi does not exist yet, introduce one.
value = NewPhi(inputs, value, control);
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index bb031ff447..8b90f072a0 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -230,12 +230,6 @@ class AstGraphBuilder : public AstVisitor {
// frame states with the undefined values.
void ClearNonLiveSlotsInFrameStates();
- // Helper to wrap a Handle<T> into a Unique<T>.
- template <class T>
- Unique<T> MakeUnique(Handle<T> object) {
- return Unique<T>::CreateUninitialized(object);
- }
-
Node** EnsureInputBufferSize(int size);
// Named and keyed loads require a VectorSlotPair for successful lowering.
@@ -262,9 +256,6 @@ class AstGraphBuilder : public AstVisitor {
// Builder to create an arguments object if it is used.
Node* BuildArgumentsObject(Variable* arguments);
- // Builder to create an array of rest parameters if used
- Node* BuildRestArgumentsArray(Variable* rest, int index);
-
// Builder that assigns to the {.this_function} internal variable if needed.
Node* BuildThisFunctionVariable(Variable* this_function_var);
@@ -315,8 +306,8 @@ class AstGraphBuilder : public AstVisitor {
TypeFeedbackId id, int slot_index);
// Builders for accessing the function context.
- Node* BuildLoadBuiltinsObject();
Node* BuildLoadGlobalObject();
+ Node* BuildLoadNativeContextField(int index);
Node* BuildLoadGlobalProxy();
Node* BuildLoadFeedbackVector();
@@ -335,8 +326,9 @@ class AstGraphBuilder : public AstVisitor {
// Builder for adding the [[HomeObject]] to a value if the value came from a
// function literal and needs a home object. Do nothing otherwise.
- Node* BuildSetHomeObject(Node* value, Node* home_object, Expression* expr,
- const VectorSlotPair& feedback);
+ Node* BuildSetHomeObject(Node* value, Node* home_object,
+ ObjectLiteralProperty* property,
+ int slot_number = 0);
// Builders for error reporting at runtime.
Node* BuildThrowError(Node* exception, BailoutId bailout_id);
@@ -411,6 +403,10 @@ class AstGraphBuilder : public AstVisitor {
const VectorSlotPair& feedback,
BailoutId bailout_id);
+ // Dispatched from VisitObjectLiteral.
+ void VisitObjectLiteralAccessor(Node* home_object,
+ ObjectLiteralProperty* property);
+
// Dispatched from VisitClassLiteral.
void VisitClassLiteralContents(ClassLiteral* expr);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index 4d1d41e122..d9ec109e40 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -193,12 +193,21 @@ void ALAA::VisitCompareOperation(CompareOperation* e) {
void ALAA::VisitSpread(Spread* e) { Visit(e->expression()); }
+void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
+
+
void ALAA::VisitCaseClause(CaseClause* cc) {
if (!cc->is_default()) Visit(cc->label());
VisitStatements(cc->statements());
}
+void ALAA::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
// ---------------------------------------------------------------------------
// -- Interesting nodes-------------------------------------------------------
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 00291bba48..23170e701b 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -81,11 +81,12 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
// Construct increment operation.
Node* base = graph->NewNode(
PointerConstant(&common, data->GetCounterAddress(block_number)));
- Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero);
+ Node* load = graph->NewNode(machine.Load(kMachUint32), base, zero,
+ graph->start(), graph->start());
Node* inc = graph->NewNode(machine.Int32Add(), load, one);
Node* store = graph->NewNode(
machine.Store(StoreRepresentation(kMachUint32, kNoWriteBarrier)), base,
- zero, inc);
+ zero, inc, graph->start(), graph->start());
// Insert the new nodes.
static const int kArraySize = 6;
Node* to_insert[kArraySize] = {zero, one, base, load, inc, store};
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
new file mode 100644
index 0000000000..5ba18ffc97
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -0,0 +1,547 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-graph-builder.h"
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/operator-properties.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Issues:
+// - Need to deal with FrameState / FrameStateBeforeAndAfter / StateValue.
+// - Scopes - intimately tied to AST. Need to eval what is needed.
+// - Need to resolve closure parameter treatment.
+BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
+ int register_count,
+ int parameter_count,
+ Node* control_dependency,
+ Node* context)
+ : builder_(builder),
+ register_count_(register_count),
+ parameter_count_(parameter_count),
+ context_(context),
+ control_dependency_(control_dependency),
+ effect_dependency_(control_dependency),
+ values_(builder->local_zone()) {
+ // The layout of values_ is:
+ //
+ // [receiver] [parameters] [registers]
+ //
+ // parameter[0] is the receiver (this), parameters 1..N are the
+ // parameters supplied to the method (arg0..argN-1). The accumulator
+ // is stored separately.
+
+ // Parameters including the receiver
+ for (int i = 0; i < parameter_count; i++) {
+ const char* debug_name = (i == 0) ? "%this" : nullptr;
+ const Operator* op = common()->Parameter(i, debug_name);
+ Node* parameter = builder->graph()->NewNode(op, graph()->start());
+ values()->push_back(parameter);
+ }
+
+ // Registers
+ register_base_ = static_cast<int>(values()->size());
+ Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+ values()->insert(values()->end(), register_count, undefined_constant);
+
+ // Accumulator
+ accumulator_ = undefined_constant;
+}
+
+
+int BytecodeGraphBuilder::Environment::RegisterToValuesIndex(
+ interpreter::Register the_register) const {
+ if (the_register.is_parameter()) {
+ return the_register.ToParameterIndex(parameter_count());
+ } else {
+ return the_register.index() + register_base();
+ }
+}
+
+
+void BytecodeGraphBuilder::Environment::BindRegister(
+ interpreter::Register the_register, Node* node) {
+ int values_index = RegisterToValuesIndex(the_register);
+ values()->at(values_index) = node;
+}
+
+
+Node* BytecodeGraphBuilder::Environment::LookupRegister(
+ interpreter::Register the_register) const {
+ int values_index = RegisterToValuesIndex(the_register);
+ return values()->at(values_index);
+}
+
+
+void BytecodeGraphBuilder::Environment::BindAccumulator(Node* node) {
+ accumulator_ = node;
+}
+
+
+Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
+ return accumulator_;
+}
+
+
+bool BytecodeGraphBuilder::Environment::IsMarkedAsUnreachable() const {
+ return GetControlDependency()->opcode() == IrOpcode::kDead;
+}
+
+
+void BytecodeGraphBuilder::Environment::MarkAsUnreachable() {
+ UpdateControlDependency(builder()->jsgraph()->Dead());
+}
+
+
+BytecodeGraphBuilder::BytecodeGraphBuilder(Zone* local_zone,
+ CompilationInfo* compilation_info,
+ JSGraph* jsgraph)
+ : local_zone_(local_zone),
+ info_(compilation_info),
+ jsgraph_(jsgraph),
+ input_buffer_size_(0),
+ input_buffer_(nullptr),
+ exit_controls_(local_zone) {
+ bytecode_array_ = handle(info()->shared_info()->bytecode_array());
+}
+
+
+Node* BytecodeGraphBuilder::GetFunctionContext() {
+ if (!function_context_.is_set()) {
+ // Parameter (arity + 1) is special for the outer context of the function
+ const Operator* op =
+ common()->Parameter(bytecode_array()->parameter_count(), "%context");
+ Node* node = NewNode(op, graph()->start());
+ function_context_.set(node);
+ }
+ return function_context_.get();
+}
+
+
+bool BytecodeGraphBuilder::CreateGraph(bool stack_check) {
+ // Set up the basic structure of the graph. Outputs for {Start} are
+ // the formal parameters (including the receiver) plus context and
+ // closure.
+
+ // The additional count items are for the context and closure.
+ int actual_parameter_count = bytecode_array()->parameter_count() + 2;
+ graph()->SetStart(graph()->NewNode(common()->Start(actual_parameter_count)));
+
+ Environment env(this, bytecode_array()->register_count(),
+ bytecode_array()->parameter_count(), graph()->start(),
+ GetFunctionContext());
+ set_environment(&env);
+
+ // Build function context only if there are context allocated variables.
+ if (info()->num_heap_slots() > 0) {
+ UNIMPLEMENTED(); // TODO(oth): Write ast-graph-builder equivalent.
+ } else {
+ // Simply use the outer function context in building the graph.
+ CreateGraphBody(stack_check);
+ }
+
+ // Finish the basic structure of the graph.
+ DCHECK_NE(0u, exit_controls_.size());
+ int const input_count = static_cast<int>(exit_controls_.size());
+ Node** const inputs = &exit_controls_.front();
+ Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
+ graph()->SetEnd(end);
+
+ return true;
+}
+
+
+void BytecodeGraphBuilder::CreateGraphBody(bool stack_check) {
+ // TODO(oth): Review ast-graph-builder equivalent, i.e. arguments
+ // object setup, this function variable if used, tracing hooks.
+ VisitBytecodes();
+}
+
+
+void BytecodeGraphBuilder::VisitBytecodes() {
+ interpreter::BytecodeArrayIterator iterator(bytecode_array());
+ while (!iterator.done()) {
+ switch (iterator.current_bytecode()) {
+#define BYTECODE_CASE(name, ...) \
+ case interpreter::Bytecode::k##name: \
+ Visit##name(iterator); \
+ break;
+ BYTECODE_LIST(BYTECODE_CASE)
+#undef BYTECODE_CODE
+ }
+ iterator.Advance();
+ }
+}
+
+
+void BytecodeGraphBuilder::VisitLdaZero(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->ZeroConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaSmi8(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetSmi8Operand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->Constant(iterator.GetConstantForIndexOperand(0));
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaUndefined(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->UndefinedConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaNull(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->NullConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaTheHole(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->TheHoleConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->TrueConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* node = jsgraph()->FalseConstant();
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitLdar(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ environment()->BindAccumulator(value);
+}
+
+
+void BytecodeGraphBuilder::VisitStar(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* value = environment()->LookupAccumulator();
+ environment()->BindRegister(iterator.GetRegisterOperand(0), value);
+}
+
+
+void BytecodeGraphBuilder::VisitLdaGlobal(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitLoadIC(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedLoadIC(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitStoreIC(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitKeyedStoreIC(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitCall(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::BuildBinaryOp(
+ const Operator* js_op, const interpreter::BytecodeArrayIterator& iterator) {
+ Node* left = environment()->LookupRegister(iterator.GetRegisterOperand(0));
+ Node* right = environment()->LookupAccumulator();
+ Node* node = NewNode(js_op, left, right);
+
+ // TODO(oth): Real frame state and environment check pointing.
+ int frame_state_count =
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ for (int i = 0; i < frame_state_count; i++) {
+ NodeProperties::ReplaceFrameStateInput(node, i,
+ jsgraph()->EmptyFrameState());
+ }
+ environment()->BindAccumulator(node);
+}
+
+
+void BytecodeGraphBuilder::VisitAdd(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->Add(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitSub(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->Subtract(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitMul(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->Multiply(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitDiv(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->Divide(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitMod(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ BuildBinaryOp(javascript()->Modulus(language_mode()), iterator);
+}
+
+
+void BytecodeGraphBuilder::VisitTestEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestNotEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestEqualStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestNotEqualStrict(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestLessThan(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestGreaterThan(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestLessThanOrEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestGreaterThanOrEqual(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestIn(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitTestInstanceOf(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitToBoolean(
+ const interpreter::BytecodeArrayIterator& ToBoolean) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJump(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrue(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfTrueConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalse(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitJumpIfFalseConstant(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ UNIMPLEMENTED();
+}
+
+
+void BytecodeGraphBuilder::VisitReturn(
+ const interpreter::BytecodeArrayIterator& iterator) {
+ Node* control =
+ NewNode(common()->Return(), environment()->LookupAccumulator());
+ UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
+ if (size > input_buffer_size_) {
+ size = size + kInputBufferSizeIncrement + input_buffer_size_;
+ input_buffer_ = local_zone()->NewArray<Node*>(size);
+ input_buffer_size_ = size;
+ }
+ return input_buffer_;
+}
+
+
+Node* BytecodeGraphBuilder::MakeNode(const Operator* op, int value_input_count,
+ Node** value_inputs, bool incomplete) {
+ DCHECK_EQ(op->ValueInputCount(), value_input_count);
+
+ bool has_context = OperatorProperties::HasContextInput(op);
+ int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
+ bool has_control = op->ControlInputCount() == 1;
+ bool has_effect = op->EffectInputCount() == 1;
+
+ DCHECK_LT(op->ControlInputCount(), 2);
+ DCHECK_LT(op->EffectInputCount(), 2);
+
+ Node* result = NULL;
+ if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
+ result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
+ } else {
+ int input_count_with_deps = value_input_count;
+ if (has_context) ++input_count_with_deps;
+ input_count_with_deps += frame_state_count;
+ if (has_control) ++input_count_with_deps;
+ if (has_effect) ++input_count_with_deps;
+ Node** buffer = EnsureInputBufferSize(input_count_with_deps);
+ memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+ Node** current_input = buffer + value_input_count;
+ if (has_context) {
+ *current_input++ = environment()->Context();
+ }
+ for (int i = 0; i < frame_state_count; i++) {
+ // The frame state will be inserted later. Here we misuse
+ // the {Dead} node as a sentinel to be later overwritten
+ // with the real frame state.
+ *current_input++ = jsgraph()->Dead();
+ }
+ if (has_effect) {
+ *current_input++ = environment()->GetEffectDependency();
+ }
+ if (has_control) {
+ *current_input++ = environment()->GetControlDependency();
+ }
+ result = graph()->NewNode(op, input_count_with_deps, buffer, incomplete);
+ if (!environment()->IsMarkedAsUnreachable()) {
+ // Update the current control dependency for control-producing nodes.
+ if (NodeProperties::IsControl(result)) {
+ environment()->UpdateControlDependency(result);
+ }
+ // Update the current effect dependency for effect-producing nodes.
+ if (result->op()->EffectOutputCount() > 0) {
+ environment()->UpdateEffectDependency(result);
+ }
+ // Add implicit success continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow)) {
+ const Operator* if_success = common()->IfSuccess();
+ Node* on_success = graph()->NewNode(if_success, result);
+ environment_->UpdateControlDependency(on_success);
+ }
+ }
+ }
+
+ return result;
+}
+
+
+Node* BytecodeGraphBuilder::MergeControl(Node* control, Node* other) {
+ int inputs = control->op()->ControlInputCount() + 1;
+ if (control->opcode() == IrOpcode::kLoop) {
+ // Control node for loop exists, add input.
+ const Operator* op = common()->Loop(inputs);
+ control->AppendInput(graph_zone(), other);
+ NodeProperties::ChangeOp(control, op);
+ } else if (control->opcode() == IrOpcode::kMerge) {
+ // Control node for merge exists, add input.
+ const Operator* op = common()->Merge(inputs);
+ control->AppendInput(graph_zone(), other);
+ NodeProperties::ChangeOp(control, op);
+ } else {
+ // Control node is a singleton, introduce a merge.
+ const Operator* op = common()->Merge(inputs);
+ Node* merge_inputs[] = {control, other};
+ control = graph()->NewNode(op, arraysize(merge_inputs), merge_inputs, true);
+ }
+ return control;
+}
+
+
+void BytecodeGraphBuilder::UpdateControlDependencyToLeaveFunction(Node* exit) {
+ if (environment()->IsMarkedAsUnreachable()) return;
+ environment()->MarkAsUnreachable();
+ exit_controls_.push_back(exit);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
new file mode 100644
index 0000000000..4e479ba3e6
--- /dev/null
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
+#define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
+
+#include "src/compiler.h"
+#include "src/compiler/js-graph.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The BytecodeGraphBuilder produces a high-level IR graph based on
+// interpreter bytecodes.
+class BytecodeGraphBuilder {
+ public:
+ BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
+ JSGraph* jsgraph);
+
+ // Creates a graph by visiting bytecodes.
+ bool CreateGraph(bool stack_check = true);
+
+ Graph* graph() const { return jsgraph_->graph(); }
+
+ private:
+ class Environment;
+
+ void CreateGraphBody(bool stack_check);
+ void VisitBytecodes();
+
+ Node* LoadAccumulator(Node* value);
+
+ Node* GetFunctionContext();
+
+ void set_environment(Environment* env) { environment_ = env; }
+ const Environment* environment() const { return environment_; }
+ Environment* environment() { return environment_; }
+
+ // Node creation helpers
+ Node* NewNode(const Operator* op, bool incomplete = false) {
+ return MakeNode(op, 0, static_cast<Node**>(NULL), incomplete);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1) {
+ Node* buffer[] = {n1};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+ Node* buffer[] = {n1, n2};
+ return MakeNode(op, arraysize(buffer), buffer, false);
+ }
+
+ Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+ bool incomplete);
+
+ Node* MergeControl(Node* control, Node* other);
+
+ Node** EnsureInputBufferSize(int size);
+
+ void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ void BuildBinaryOp(const Operator* op,
+ const interpreter::BytecodeArrayIterator& iterator);
+
+ // Growth increment for the temporary buffer used to construct input lists to
+ // new nodes.
+ static const int kInputBufferSizeIncrement = 64;
+
+ // Field accessors
+ CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+ Zone* graph_zone() const { return graph()->zone(); }
+ CompilationInfo* info() const { return info_; }
+ JSGraph* jsgraph() const { return jsgraph_; }
+ JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+ Zone* local_zone() const { return local_zone_; }
+ const Handle<BytecodeArray>& bytecode_array() const {
+ return bytecode_array_;
+ }
+
+ LanguageMode language_mode() const {
+ // TODO(oth): need to propagate language mode through
+ return LanguageMode::SLOPPY;
+ }
+
+#define DECLARE_VISIT_BYTECODE(name, ...) \
+ void Visit##name(const interpreter::BytecodeArrayIterator& iterator);
+ BYTECODE_LIST(DECLARE_VISIT_BYTECODE)
+#undef DECLARE_VISIT_BYTECODE
+
+ Zone* local_zone_;
+ CompilationInfo* info_;
+ JSGraph* jsgraph_;
+ Handle<BytecodeArray> bytecode_array_;
+ Environment* environment_;
+
+ // Temporary storage for building node input lists.
+ int input_buffer_size_;
+ Node** input_buffer_;
+
+ // Nodes representing values in the activation record.
+ SetOncePointer<Node> function_context_;
+
+ // Control nodes that exit the function body.
+ ZoneVector<Node*> exit_controls_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilder);
+};
+
+
+class BytecodeGraphBuilder::Environment : public ZoneObject {
+ public:
+ Environment(BytecodeGraphBuilder* builder, int register_count,
+ int parameter_count, Node* control_dependency, Node* context);
+
+ int parameter_count() const { return parameter_count_; }
+ int register_count() const { return register_count_; }
+
+ void BindRegister(interpreter::Register the_register, Node* node);
+ Node* LookupRegister(interpreter::Register the_register) const;
+
+ void BindAccumulator(Node* node);
+ Node* LookupAccumulator() const;
+
+ bool IsMarkedAsUnreachable() const;
+ void MarkAsUnreachable();
+
+ // Effect dependency tracked by this environment.
+ Node* GetEffectDependency() { return effect_dependency_; }
+ void UpdateEffectDependency(Node* dependency) {
+ effect_dependency_ = dependency;
+ }
+
+ // Control dependency tracked by this environment.
+ Node* GetControlDependency() const { return control_dependency_; }
+ void UpdateControlDependency(Node* dependency) {
+ control_dependency_ = dependency;
+ }
+
+ Node* Context() const { return context_; }
+
+ private:
+ int RegisterToValuesIndex(interpreter::Register the_register) const;
+
+ Zone* zone() const { return builder_->local_zone(); }
+ Graph* graph() const { return builder_->graph(); }
+ CommonOperatorBuilder* common() const { return builder_->common(); }
+ BytecodeGraphBuilder* builder() const { return builder_; }
+ const NodeVector* values() const { return &values_; }
+ NodeVector* values() { return &values_; }
+ Node* accumulator() { return accumulator_; }
+ int register_base() const { return register_base_; }
+
+ BytecodeGraphBuilder* builder_;
+ int register_count_;
+ int parameter_count_;
+ Node* accumulator_;
+ Node* context_;
+ Node* control_dependency_;
+ Node* effect_dependency_;
+ NodeVector values_;
+ int register_base_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index ca92951670..4421c4f3e3 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -157,7 +157,7 @@ Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
if (machine()->Is64() ||
- NodeProperties::GetBounds(value).upper->Is(Type::SignedSmall())) {
+ NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
return Replace(ChangeInt32ToSmi(value));
}
@@ -184,7 +184,7 @@ Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
Signedness signedness) {
- if (NodeProperties::GetBounds(value).upper->Is(Type::TaggedSigned())) {
+ if (NodeProperties::GetType(value)->Is(Type::TaggedSigned())) {
return Replace(ChangeSmiToInt32(value));
}
@@ -193,7 +193,7 @@ Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
? machine()->ChangeFloat64ToInt32()
: machine()->ChangeFloat64ToUint32();
- if (NodeProperties::GetBounds(value).upper->Is(Type::TaggedPointer())) {
+ if (NodeProperties::GetType(value)->Is(Type::TaggedPointer())) {
return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
}
@@ -312,7 +312,7 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
- if (NodeProperties::GetBounds(value).upper->Is(Type::UnsignedSmall())) {
+ if (NodeProperties::GetType(value)->Is(Type::UnsignedSmall())) {
return Replace(ChangeUint32ToSmi(value));
}
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index f0762e9bbe..83cbd22604 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -37,6 +37,8 @@ class InstructionOperandConverter {
double InputDouble(size_t index) { return ToDouble(instr_->InputAt(index)); }
+ float InputFloat32(size_t index) { return ToFloat32(instr_->InputAt(index)); }
+
int32_t InputInt32(size_t index) {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
@@ -112,6 +114,8 @@ class InstructionOperandConverter {
double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
+ float ToFloat32(InstructionOperand* op) { return ToConstant(op).ToFloat32(); }
+
ExternalReference ToExternalReference(InstructionOperand* op) {
return ToConstant(op).ToExternalReference();
}
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 07a741f73e..91602a02a3 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -147,7 +147,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
// Ensure there is space for lazy deoptimization in the code.
- if (!info->IsStub()) {
+ if (info->ShouldEnsureSpaceForLazyDeopt()) {
int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
while (masm()->pc_offset() < target_offset) {
masm()->nop();
@@ -167,8 +167,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
- Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), info->flags(), info);
+ Handle<Code> result =
+ v8::internal::CodeGenerator::MakeCodeEpilogue(masm(), info);
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetSpillSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
@@ -193,7 +193,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
- if (!info->IsStub()) {
+ if (!info->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
@@ -359,11 +359,8 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
data->SetInlinedFunctionCount(
Smi::FromInt(static_cast<int>(inlined_function_count_)));
data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
- // TODO(jarin) The following code was copied over from Lithium, not sure
- // whether the scope or the IsOptimizing condition are really needed.
- if (info->IsOptimizing()) {
- // Reference to shared function info does not change between phases.
- AllowDeferredHandleDereference allow_handle_dereference;
+
+ if (info->has_shared_info()) {
data->SetSharedFunctionInfo(*info->shared_info());
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
index b005c952dd..e7f7436a0b 100644
--- a/deps/v8/src/compiler/common-node-cache.cc
+++ b/deps/v8/src/compiler/common-node-cache.cc
@@ -6,7 +6,6 @@
#include "src/assembler.h"
#include "src/compiler/node.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index c1cd75ef7b..e4af2ad1f9 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -33,8 +33,7 @@ Decision DecideCondition(Node* const cond) {
}
case IrOpcode::kHeapConstant: {
HeapObjectMatcher mcond(cond);
- return mcond.Value().handle()->BooleanValue() ? Decision::kTrue
- : Decision::kFalse;
+ return mcond.Value()->BooleanValue() ? Decision::kTrue : Decision::kFalse;
}
default:
return Decision::kUnknown;
@@ -86,10 +85,10 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
- use->set_op(common()->IfFalse());
+ NodeProperties::ChangeOp(use, common()->IfFalse());
break;
case IrOpcode::kIfFalse:
- use->set_op(common()->IfTrue());
+ NodeProperties::ChangeOp(use, common()->IfTrue());
break;
default:
UNREACHABLE();
@@ -100,7 +99,8 @@ Reduction CommonOperatorReducer::ReduceBranch(Node* node) {
// graph reduction logic will ensure that the uses are revisited properly.
node->ReplaceInput(0, cond->InputAt(0));
// Negate the hint for {branch}.
- node->set_op(common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
+ NodeProperties::ChangeOp(
+ node, common()->Branch(NegateBranchHint(BranchHintOf(node->op()))));
return Changed(node);
}
Decision const decision = DecideCondition(cond);
@@ -149,8 +149,8 @@ Reduction CommonOperatorReducer::ReduceMerge(Node* node) {
DCHECK(branch->OwnedBy(if_true, if_false));
Node* const control = branch->InputAt(1);
// Mark the {branch} as {Dead}.
- branch->set_op(common()->Dead());
branch->TrimInputCount(0);
+ NodeProperties::ChangeOp(branch, common()->Dead());
return Replace(control);
}
}
@@ -281,9 +281,8 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
DCHECK_NE(0, control_input_count);
DCHECK_EQ(control_input_count, value->InputCount() - 1);
DCHECK_EQ(control_input_count, effect->InputCount() - 1);
- Node* const end = graph()->end();
- DCHECK_EQ(IrOpcode::kEnd, end->opcode());
- DCHECK_NE(0, end->InputCount());
+ DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
+ DCHECK_NE(0, graph()->end()->InputCount());
for (int i = 0; i < control_input_count; ++i) {
// Create a new {Return} and connect it to {end}. We don't need to mark
// {end} as revisit, because we mark {node} as {Dead} below, which was
@@ -291,8 +290,7 @@ Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
// the reducer logic will visit {end} again.
Node* ret = graph()->NewNode(common()->Return(), value->InputAt(i),
effect->InputAt(i), control->InputAt(i));
- end->set_op(common()->End(end->InputCount() + 1));
- end->AppendInput(graph()->zone(), ret);
+ NodeProperties::MergeControlToEnd(graph(), common(), ret);
}
// Mark the merge {control} and return {node} as {dead}.
Replace(control, dead());
@@ -362,19 +360,19 @@ Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op,
Node* a) {
- node->set_op(op);
node->ReplaceInput(0, a);
node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
Reduction CommonOperatorReducer::Change(Node* node, Operator const* op, Node* a,
Node* b) {
- node->set_op(op);
node->ReplaceInput(0, a);
node->ReplaceInput(1, b);
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index a809cc8aab..bacaae980f 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -9,7 +9,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
-#include "src/unique.h"
+#include "src/handles-inl.h"
#include "src/zone.h"
namespace v8 {
@@ -575,12 +575,14 @@ const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
const Operator* CommonOperatorBuilder::HeapConstant(
- const Unique<HeapObject>& value) {
- return new (zone()) Operator1<Unique<HeapObject>>( // --
- IrOpcode::kHeapConstant, Operator::kPure, // opcode
- "HeapConstant", // name
- 0, 0, 0, 1, 0, 0, // counts
- value); // parameter
+ const Handle<HeapObject>& value) {
+ return new (zone())
+ Operator1<Handle<HeapObject>, Handle<HeapObject>::equal_to,
+ Handle<HeapObject>::hash>( // --
+ IrOpcode::kHeapConstant, Operator::kPure, // opcode
+ "HeapConstant", // name
+ 0, 0, 0, 1, 0, 0, // counts
+ value); // parameter
}
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index cc2ae22935..22490f7fe1 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -7,7 +7,7 @@
#include "src/compiler/frame-states.h"
#include "src/compiler/machine-type.h"
-#include "src/unique.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -139,7 +139,7 @@ class CommonOperatorBuilder final : public ZoneObject {
const Operator* Float64Constant(volatile double);
const Operator* ExternalConstant(const ExternalReference&);
const Operator* NumberConstant(volatile double);
- const Operator* HeapConstant(const Unique<HeapObject>&);
+ const Operator* HeapConstant(const Handle<HeapObject>&);
const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
const Operator* Phi(MachineType type, int value_input_count);
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index 3579828355..bb0ed140d9 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "control-builders.h"
+#include "src/compiler/control-builders.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 25e183e1f6..3fc3bcefac 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -245,7 +245,7 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
branch->NullAllInputs();
if_true->ReplaceInput(0, node);
}
- if_true->set_op(common()->IfValue(value));
+ NodeProperties::ChangeOp(if_true, common()->IfValue(value));
if_false->NullAllInputs();
Enqueue(if_true);
@@ -261,13 +261,13 @@ bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
return false;
}
DCHECK_LT(1u, values.size());
- node->set_op(common()->Switch(values.size() + 1));
node->ReplaceInput(0, index);
- if_true->set_op(common()->IfValue(value));
+ NodeProperties::ChangeOp(node, common()->Switch(values.size() + 1));
if_true->ReplaceInput(0, node);
+ NodeProperties::ChangeOp(if_true, common()->IfValue(value));
Enqueue(if_true);
- if_false->set_op(common()->IfDefault());
if_false->ReplaceInput(0, node);
+ NodeProperties::ChangeOp(if_false, common()->IfDefault());
Enqueue(if_false);
branch->NullAllInputs();
return true;
diff --git a/deps/v8/src/compiler/dead-code-elimination.cc b/deps/v8/src/compiler/dead-code-elimination.cc
index 755620a3cd..697d7f870e 100644
--- a/deps/v8/src/compiler/dead-code-elimination.cc
+++ b/deps/v8/src/compiler/dead-code-elimination.cc
@@ -52,8 +52,8 @@ Reduction DeadCodeElimination::ReduceEnd(Node* node) {
if (live_input_count == 0) {
return Replace(dead());
} else if (live_input_count < input_count) {
- node->set_op(common()->End(live_input_count));
node->TrimInputCount(live_input_count);
+ NodeProperties::ChangeOp(node, common()->End(live_input_count));
return Changed(node);
}
DCHECK_EQ(input_count, live_input_count);
@@ -137,7 +137,7 @@ Reduction DeadCodeElimination::ReduceNode(Node* node) {
void DeadCodeElimination::TrimMergeOrPhi(Node* node, int size) {
const Operator* const op = common()->ResizeMergeOrPhi(node->op(), size);
node->TrimInputCount(OperatorProperties::GetTotalInputCount(op));
- node->set_op(op);
+ NodeProperties::ChangeOp(node, op);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/diamond.h b/deps/v8/src/compiler/diamond.h
index cf83638bba..f562092a8a 100644
--- a/deps/v8/src/compiler/diamond.h
+++ b/deps/v8/src/compiler/diamond.h
@@ -52,32 +52,10 @@ struct Diamond {
Node* Phi(MachineType machine_type, Node* tv, Node* fv) {
return graph->NewNode(common->Phi(machine_type, 2), tv, fv, merge);
}
-
- Node* EffectPhi(Node* tv, Node* fv) {
- return graph->NewNode(common->EffectPhi(2), tv, fv, merge);
- }
-
- void OverwriteWithPhi(Node* node, MachineType machine_type, Node* tv,
- Node* fv) {
- DCHECK(node->InputCount() >= 3);
- node->set_op(common->Phi(machine_type, 2));
- node->ReplaceInput(0, tv);
- node->ReplaceInput(1, fv);
- node->ReplaceInput(2, merge);
- node->TrimInputCount(3);
- }
-
- void OverwriteWithEffectPhi(Node* node, Node* te, Node* fe) {
- DCHECK(node->InputCount() >= 3);
- node->set_op(common->EffectPhi(2));
- node->ReplaceInput(0, te);
- node->ReplaceInput(1, fe);
- node->ReplaceInput(2, merge);
- node->TrimInputCount(3);
- }
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_DIAMOND_H_
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index aa823b6ba8..0b066783c3 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -110,9 +110,9 @@ class Frame : public ZoneObject {
int AlignSavedCalleeRegisterSlots() {
DCHECK_EQ(0, spilled_callee_register_slot_count_);
- int frame_slot_count_before = frame_slot_count_;
- frame_slot_count_ = RoundUp(frame_slot_count_, 2);
- return frame_slot_count_before - frame_slot_count_;
+ int delta = frame_slot_count_ & 1;
+ frame_slot_count_ += delta;
+ return delta;
}
void AllocateSavedCalleeRegisterSlots(int count) {
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 80b40a7d9a..1be0b6dec7 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -9,6 +9,7 @@
#include "src/compiler/graph-reducer.h"
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/verifier.h"
namespace v8 {
namespace internal {
@@ -167,6 +168,7 @@ void GraphReducer::Replace(Node* node, Node* replacement, NodeId max_id) {
// {replacement} was already reduced and finish.
for (Edge edge : node->use_edges()) {
Node* const user = edge.from();
+ Verifier::VerifyEdgeInputReplacement(edge, replacement);
edge.UpdateTo(replacement);
// Don't revisit this node if it refers to itself.
if (user != node) Revisit(user);
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index bf3d8edea4..07ca04532b 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -18,6 +18,7 @@
#include "src/compiler/register-allocator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
+#include "src/interpreter/bytecodes.h"
#include "src/ostreams.h"
namespace v8 {
@@ -28,14 +29,11 @@ namespace compiler {
FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
const char* suffix, const char* mode) {
EmbeddedVector<char, 256> filename(0);
- base::SmartArrayPointer<char> function_name;
- if (info->has_shared_info()) {
- function_name = info->shared_info()->DebugName()->ToCString();
- if (strlen(function_name.get()) > 0) {
- SNPrintF(filename, "turbo-%s", function_name.get());
- } else {
- SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
- }
+ base::SmartArrayPointer<char> debug_name = info->GetDebugName();
+ if (strlen(debug_name.get()) > 0) {
+ SNPrintF(filename, "turbo-%s", debug_name.get());
+ } else if (info->has_shared_info()) {
+ SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
} else {
SNPrintF(filename, "turbo-none-%s", phase);
}
@@ -129,13 +127,10 @@ class JSONGraphNodeWriter {
os_ << ",\"control\":" << (NodeProperties::IsControl(node) ? "true"
: "false");
if (NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
- std::ostringstream upper;
- bounds.upper->PrintTo(upper);
- std::ostringstream lower;
- bounds.lower->PrintTo(lower);
- os_ << ",\"upper_type\":\"" << Escaped(upper, "\"") << "\"";
- os_ << ",\"lower_type\":\"" << Escaped(lower, "\"") << "\"";
+ Type* type = NodeProperties::GetType(node);
+ std::ostringstream type_out;
+ type->PrintTo(type_out);
+ os_ << ",\"type\":\"" << Escaped(type_out, "\"") << "\"";
}
os_ << "}";
}
@@ -304,12 +299,10 @@ void GraphVisualizer::PrintNode(Node* node, bool gray) {
os_ << "}";
if (FLAG_trace_turbo_types && NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
- std::ostringstream upper;
- bounds.upper->PrintTo(upper);
- std::ostringstream lower;
- bounds.lower->PrintTo(lower);
- os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
+ Type* type = NodeProperties::GetType(node);
+ std::ostringstream type_out;
+ type->PrintTo(type_out);
+ os_ << "|" << Escaped(type_out);
}
os_ << "}\"\n";
@@ -422,7 +415,9 @@ class GraphC1Visualizer {
void PrintInputs(InputIterator* i, int count, const char* prefix);
void PrintType(Node* node);
- void PrintLiveRange(LiveRange* range, const char* type);
+ void PrintLiveRange(LiveRange* range, const char* type, int vreg);
+ void PrintLiveRangeChain(TopLevelLiveRange* range, const char* type);
+
class Tag final BASE_EMBEDDED {
public:
Tag(GraphC1Visualizer* visualizer, const char* name) {
@@ -491,15 +486,14 @@ void GraphC1Visualizer::PrintIntProperty(const char* name, int value) {
void GraphC1Visualizer::PrintCompilation(const CompilationInfo* info) {
Tag tag(this, "compilation");
+ base::SmartArrayPointer<char> name = info->GetDebugName();
if (info->IsOptimizing()) {
- Handle<String> name = info->literal()->debug_name();
- PrintStringProperty("name", name->ToCString().get());
+ PrintStringProperty("name", name.get());
PrintIndent();
- os_ << "method \"" << name->ToCString().get() << ":"
- << info->optimization_id() << "\"\n";
+ os_ << "method \"" << name.get() << ":" << info->optimization_id()
+ << "\"\n";
} else {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("name", name.get());
PrintStringProperty("method", "stub");
}
PrintLongProperty("date",
@@ -546,11 +540,9 @@ void GraphC1Visualizer::PrintInputs(Node* node) {
void GraphC1Visualizer::PrintType(Node* node) {
if (NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
+ Type* type = NodeProperties::GetType(node);
os_ << " type:";
- bounds.upper->PrintTo(os_);
- os_ << "..";
- bounds.lower->PrintTo(os_);
+ type->PrintTo(os_);
}
}
@@ -697,23 +689,34 @@ void GraphC1Visualizer::PrintLiveRanges(const char* phase,
PrintStringProperty("name", phase);
for (auto range : data->fixed_double_live_ranges()) {
- PrintLiveRange(range, "fixed");
+ PrintLiveRangeChain(range, "fixed");
}
for (auto range : data->fixed_live_ranges()) {
- PrintLiveRange(range, "fixed");
+ PrintLiveRangeChain(range, "fixed");
}
for (auto range : data->live_ranges()) {
- PrintLiveRange(range, "object");
+ PrintLiveRangeChain(range, "object");
+ }
+}
+
+
+void GraphC1Visualizer::PrintLiveRangeChain(TopLevelLiveRange* range,
+ const char* type) {
+ if (range == nullptr || range->IsEmpty()) return;
+ int vreg = range->vreg();
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ PrintLiveRange(child, type, vreg);
}
}
-void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
+void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type,
+ int vreg) {
if (range != NULL && !range->IsEmpty()) {
PrintIndent();
- os_ << range->id() << " " << type;
+ os_ << vreg << ":" << range->relative_id() << " " << type;
if (range->HasRegisterAssigned()) {
AllocatedOperand op = AllocatedOperand::cast(range->GetAssignedOperand());
int assigned_reg = op.index();
@@ -742,13 +745,8 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
}
}
}
- int parent_index = -1;
- if (range->IsChild()) {
- parent_index = range->parent()->id();
- } else {
- parent_index = range->id();
- }
- os_ << " " << parent_index;
+
+ os_ << " " << vreg;
for (auto interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
os_ << " [" << interval->start().value() << ", "
diff --git a/deps/v8/src/compiler/graph.cc b/deps/v8/src/compiler/graph.cc
index 00074b5513..3d4d6da89c 100644
--- a/deps/v8/src/compiler/graph.cc
+++ b/deps/v8/src/compiler/graph.cc
@@ -8,6 +8,8 @@
#include "src/base/bits.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/verifier.h"
namespace v8 {
namespace internal {
@@ -43,7 +45,14 @@ void Graph::RemoveDecorator(GraphDecorator* decorator) {
Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
bool incomplete) {
- DCHECK_LE(op->ValueInputCount(), input_count);
+ Node* node = NewNodeUnchecked(op, input_count, inputs, incomplete);
+ Verifier::VerifyNode(node);
+ return node;
+}
+
+
+Node* Graph::NewNodeUnchecked(const Operator* op, int input_count,
+ Node** inputs, bool incomplete) {
Node* const node =
Node::New(zone(), NextNodeId(), op, input_count, inputs, incomplete);
Decorate(node);
diff --git a/deps/v8/src/compiler/graph.h b/deps/v8/src/compiler/graph.h
index cb073b312a..28686aa2ca 100644
--- a/deps/v8/src/compiler/graph.h
+++ b/deps/v8/src/compiler/graph.h
@@ -34,6 +34,10 @@ class Graph : public ZoneObject {
explicit Graph(Zone* zone);
// Base implementation used by all factory methods.
+ Node* NewNodeUnchecked(const Operator* op, int input_count, Node** inputs,
+ bool incomplete = false);
+
+ // Factory that checks the input count.
Node* NewNode(const Operator* op, int input_count, Node** inputs,
bool incomplete = false);
diff --git a/deps/v8/src/compiler/greedy-allocator.cc b/deps/v8/src/compiler/greedy-allocator.cc
index 2da30bd289..e0368bf366 100644
--- a/deps/v8/src/compiler/greedy-allocator.cc
+++ b/deps/v8/src/compiler/greedy-allocator.cc
@@ -21,12 +21,19 @@ const float GreedyAllocator::kAllocatedRangeMultiplier = 10.0;
namespace {
-
void UpdateOperands(LiveRange* range, RegisterAllocationData* data) {
int reg_id = range->assigned_register();
range->SetUseHints(reg_id);
- if (range->is_phi()) {
- data->GetPhiMapValueFor(range->id())->set_assigned_register(reg_id);
+ if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
+ data->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg_id);
+ }
+}
+
+
+void UnsetOperands(LiveRange* range, RegisterAllocationData* data) {
+ range->UnsetUseHints();
+ if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
+ data->GetPhiMapValueFor(range->TopLevel())->UnsetAssignedRegister();
}
}
@@ -38,15 +45,13 @@ LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
(data->code()
->GetInstructionBlock(pos.ToInstructionIndex())
->last_instruction_index() != pos.ToInstructionIndex()));
- LiveRange* result = data->NewChildRangeFor(range);
- range->SplitAt(pos, result, data->allocation_zone());
+ LiveRange* result = range->SplitAt(pos, data->allocation_zone());
return result;
}
// TODO(mtrofin): explain why splitting in gap START is always OK.
LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
- const InstructionSequence* code,
int instruction_index) {
LifetimePosition ret = LifetimePosition::Invalid();
@@ -58,53 +63,6 @@ LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
}
-int GetFirstGapIndex(const UseInterval* interval) {
- LifetimePosition start = interval->start();
- int ret = start.ToInstructionIndex();
- return ret;
-}
-
-
-int GetLastGapIndex(const UseInterval* interval) {
- LifetimePosition end = interval->end();
- return end.ToInstructionIndex();
-}
-
-
-// Basic heuristic for advancing the algorithm, if any other splitting heuristic
-// failed.
-LifetimePosition GetLastResortSplitPosition(const LiveRange* range,
- const InstructionSequence* code) {
- if (range->first_interval()->next() != nullptr) {
- return range->first_interval()->next()->start();
- }
-
- UseInterval* interval = range->first_interval();
- int first = GetFirstGapIndex(interval);
- int last = GetLastGapIndex(interval);
- if (first == last) return LifetimePosition::Invalid();
-
- // TODO(mtrofin:) determine why we can't just split somewhere arbitrary
- // within the range, e.g. it's middle.
- for (UsePosition* pos = range->first_pos(); pos != nullptr;
- pos = pos->next()) {
- if (pos->type() != UsePositionType::kRequiresRegister) continue;
- LifetimePosition before = GetSplitPositionForInstruction(
- range, code, pos->pos().ToInstructionIndex());
- if (before.IsValid()) return before;
- LifetimePosition after = GetSplitPositionForInstruction(
- range, code, pos->pos().ToInstructionIndex() + 1);
- if (after.IsValid()) return after;
- }
- return LifetimePosition::Invalid();
-}
-
-
-bool IsProgressPossible(const LiveRange* range,
- const InstructionSequence* code) {
- return range->CanBeSpilled(range->Start()) ||
- GetLastResortSplitPosition(range, code).IsValid();
-}
} // namespace
@@ -117,28 +75,37 @@ AllocationCandidate AllocationScheduler::GetNext() {
void AllocationScheduler::Schedule(LiveRange* range) {
- TRACE("Scheduling live range %d.\n", range->id());
+ TRACE("Scheduling live range %d:%d.\n", range->TopLevel()->vreg(),
+ range->relative_id());
queue_.push(AllocationCandidate(range));
}
+
+void AllocationScheduler::Schedule(LiveRangeGroup* group) {
+ queue_.push(AllocationCandidate(group));
+}
+
GreedyAllocator::GreedyAllocator(RegisterAllocationData* data,
RegisterKind kind, Zone* local_zone)
: RegisterAllocator(data, kind),
local_zone_(local_zone),
allocations_(local_zone),
- scheduler_(local_zone) {}
+ scheduler_(local_zone),
+ groups_(local_zone) {}
void GreedyAllocator::AssignRangeToRegister(int reg_id, LiveRange* range) {
- TRACE("Assigning register %s to live range %d\n", RegisterName(reg_id),
- range->id());
+ TRACE("Assigning register %s to live range %d:%d\n", RegisterName(reg_id),
+ range->TopLevel()->vreg(), range->relative_id());
DCHECK(!range->HasRegisterAssigned());
AllocateRegisterToRange(reg_id, range);
- TRACE("Assigning %s to range %d\n", RegisterName(reg_id), range->id());
+ TRACE("Assigning %s to range %d%d.\n", RegisterName(reg_id),
+ range->TopLevel()->vreg(), range->relative_id());
range->set_assigned_register(reg_id);
+ UpdateOperands(range, data());
}
@@ -151,7 +118,7 @@ void GreedyAllocator::PreallocateFixedRanges() {
for (LiveRange* fixed_range : GetFixedRegisters()) {
if (fixed_range != nullptr) {
DCHECK_EQ(mode(), fixed_range->kind());
- DCHECK(fixed_range->IsFixed());
+ DCHECK(fixed_range->TopLevel()->IsFixed());
int reg_nr = fixed_range->assigned_register();
EnsureValidRangeWeight(fixed_range);
@@ -161,10 +128,102 @@ void GreedyAllocator::PreallocateFixedRanges() {
}
+void GreedyAllocator::GroupLiveRanges() {
+ CoalescedLiveRanges grouper(local_zone());
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ grouper.clear();
+ // Skip splinters, because we do not want to optimize for them, and moves
+ // due to assigning them to different registers occur in deferred blocks.
+ if (!CanProcessRange(range) || range->IsSplinter() || !range->is_phi()) {
+ continue;
+ }
+
+ // A phi can't be a memory operand, so it couldn't have been split.
+ DCHECK(!range->spilled());
+
+ // Maybe this phi range is itself an input to another phi which was already
+ // processed.
+ LiveRangeGroup* latest_grp = range->group() != nullptr
+ ? range->group()
+ : new (local_zone())
+ LiveRangeGroup(local_zone());
+
+ // Populate the grouper.
+ if (range->group() == nullptr) {
+ grouper.AllocateRange(range);
+ } else {
+ for (LiveRange* member : range->group()->ranges()) {
+ grouper.AllocateRange(member);
+ }
+ }
+ for (int j : data()->GetPhiMapValueFor(range)->phi()->operands()) {
+ // skip output also in input, which may happen for loops.
+ if (j == range->vreg()) continue;
+
+ TopLevelLiveRange* other_top = data()->live_ranges()[j];
+
+ if (other_top->IsSplinter()) continue;
+ // If the other was a memory operand, it might have been split.
+ // So get the unsplit part.
+ LiveRange* other =
+ other_top->next() == nullptr ? other_top : other_top->next();
+
+ if (other->spilled()) continue;
+
+ LiveRangeGroup* other_group = other->group();
+ if (other_group != nullptr) {
+ bool can_merge = true;
+ for (LiveRange* member : other_group->ranges()) {
+ if (grouper.GetConflicts(member).Current() != nullptr) {
+ can_merge = false;
+ break;
+ }
+ }
+ // If each member doesn't conflict with the current group, then since
+ // the members don't conflict with eachother either, we can merge them.
+ if (can_merge) {
+ latest_grp->ranges().insert(latest_grp->ranges().end(),
+ other_group->ranges().begin(),
+ other_group->ranges().end());
+ for (LiveRange* member : other_group->ranges()) {
+ grouper.AllocateRange(member);
+ member->set_group(latest_grp);
+ }
+ // Clear the other range, so we avoid scheduling it.
+ other_group->ranges().clear();
+ }
+ } else if (grouper.GetConflicts(other).Current() == nullptr) {
+ grouper.AllocateRange(other);
+ latest_grp->ranges().push_back(other);
+ other->set_group(latest_grp);
+ }
+ }
+
+ if (latest_grp->ranges().size() > 0 && range->group() == nullptr) {
+ latest_grp->ranges().push_back(range);
+ DCHECK(latest_grp->ranges().size() > 1);
+ groups().push_back(latest_grp);
+ range->set_group(latest_grp);
+ }
+ }
+}
+
+
void GreedyAllocator::ScheduleAllocationCandidates() {
- for (auto range : data()->live_ranges()) {
- if (CanProcessRange(range) && !range->spilled()) {
- scheduler().Schedule(range);
+ for (LiveRangeGroup* group : groups()) {
+ if (group->ranges().size() > 0) {
+ // We shouldn't have added single-range groups.
+ DCHECK(group->ranges().size() != 1);
+ scheduler().Schedule(group);
+ }
+ }
+ for (LiveRange* range : data()->live_ranges()) {
+ if (CanProcessRange(range)) {
+ for (LiveRange* child = range; child != nullptr; child = child->next()) {
+ if (!child->spilled() && child->group() == nullptr) {
+ scheduler().Schedule(child);
+ }
+ }
}
}
}
@@ -172,42 +231,110 @@ void GreedyAllocator::ScheduleAllocationCandidates() {
void GreedyAllocator::TryAllocateCandidate(
const AllocationCandidate& candidate) {
- // At this point, this is just a live range. TODO: groups.
- TryAllocateLiveRange(candidate.live_range());
+ if (candidate.is_group()) {
+ TryAllocateGroup(candidate.group());
+ } else {
+ TryAllocateLiveRange(candidate.live_range());
+ }
+}
+
+
+void GreedyAllocator::TryAllocateGroup(LiveRangeGroup* group) {
+ float group_weight = 0.0;
+ for (LiveRange* member : group->ranges()) {
+ EnsureValidRangeWeight(member);
+ group_weight = Max(group_weight, member->weight());
+ }
+
+ float eviction_weight = group_weight;
+ int eviction_reg = -1;
+ int free_reg = -1;
+ for (int reg = 0; reg < num_registers(); ++reg) {
+ float weight = GetMaximumConflictingWeight(reg, group, group_weight);
+ if (weight == LiveRange::kInvalidWeight) {
+ free_reg = reg;
+ break;
+ }
+ if (weight < eviction_weight) {
+ eviction_weight = weight;
+ eviction_reg = reg;
+ }
+ }
+ if (eviction_reg < 0 && free_reg < 0) {
+ for (LiveRange* member : group->ranges()) {
+ scheduler().Schedule(member);
+ }
+ return;
+ }
+ if (free_reg < 0) {
+ DCHECK(eviction_reg >= 0);
+ for (LiveRange* member : group->ranges()) {
+ EvictAndRescheduleConflicts(eviction_reg, member);
+ }
+ free_reg = eviction_reg;
+ }
+
+ DCHECK(free_reg >= 0);
+ for (LiveRange* member : group->ranges()) {
+ AssignRangeToRegister(free_reg, member);
+ }
}
void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// TODO(mtrofin): once we introduce groups, we'll want to first try and
// allocate at the preferred register.
- TRACE("Attempting to allocate live range %d\n", range->id());
+ TRACE("Attempting to allocate live range %d:%d.\n", range->TopLevel()->vreg(),
+ range->relative_id());
int free_reg = -1;
int evictable_reg = -1;
- EnsureValidRangeWeight(range);
- DCHECK(range->weight() != LiveRange::kInvalidWeight);
-
- float smallest_weight = LiveRange::kMaxWeight;
+ int hinted_reg = -1;
- // Seek either the first free register, or, from the set of registers
- // where the maximum conflict is lower than the candidate's weight, the one
- // with the smallest such weight.
- for (int i = 0; i < num_registers(); i++) {
- float max_conflict_weight = GetMaximumConflictingWeight(i, range);
+ EnsureValidRangeWeight(range);
+ float competing_weight = range->weight();
+ DCHECK(competing_weight != LiveRange::kInvalidWeight);
+
+ // Can we allocate at the hinted register?
+ if (range->FirstHintPosition(&hinted_reg) != nullptr) {
+ DCHECK(hinted_reg >= 0);
+ float max_conflict_weight =
+ GetMaximumConflictingWeight(hinted_reg, range, competing_weight);
if (max_conflict_weight == LiveRange::kInvalidWeight) {
- free_reg = i;
- break;
+ free_reg = hinted_reg;
+ } else if (max_conflict_weight < range->weight()) {
+ evictable_reg = hinted_reg;
}
- if (max_conflict_weight < range->weight() &&
- max_conflict_weight < smallest_weight) {
- smallest_weight = max_conflict_weight;
- evictable_reg = i;
+ }
+
+ if (free_reg < 0 && evictable_reg < 0) {
+ // There was no hinted reg, or we cannot allocate there.
+ float smallest_weight = LiveRange::kMaxWeight;
+
+ // Seek either the first free register, or, from the set of registers
+ // where the maximum conflict is lower than the candidate's weight, the one
+ // with the smallest such weight.
+ for (int i = 0; i < num_registers(); i++) {
+ // Skip unnecessarily re-visiting the hinted register, if any.
+ if (i == hinted_reg) continue;
+ float max_conflict_weight =
+ GetMaximumConflictingWeight(i, range, competing_weight);
+ if (max_conflict_weight == LiveRange::kInvalidWeight) {
+ free_reg = i;
+ break;
+ }
+ if (max_conflict_weight < range->weight() &&
+ max_conflict_weight < smallest_weight) {
+ smallest_weight = max_conflict_weight;
+ evictable_reg = i;
+ }
}
}
// We have a free register, so we use it.
if (free_reg >= 0) {
- TRACE("Found free register %s for live range %d\n", RegisterName(free_reg),
- range->id());
+ TRACE("Found free register %s for live range %d:%d.\n",
+ RegisterName(free_reg), range->TopLevel()->vreg(),
+ range->relative_id());
AssignRangeToRegister(free_reg, range);
return;
}
@@ -215,8 +342,9 @@ void GreedyAllocator::TryAllocateLiveRange(LiveRange* range) {
// We found a register to perform evictions, so we evict and allocate our
// candidate.
if (evictable_reg >= 0) {
- TRACE("Found evictable register %s for live range %d\n",
- RegisterName(free_reg), range->id());
+ TRACE("Found evictable register %s for live range %d:%d.\n",
+ RegisterName(free_reg), range->TopLevel()->vreg(),
+ range->relative_id());
EvictAndRescheduleConflicts(evictable_reg, range);
AssignRangeToRegister(evictable_reg, range);
return;
@@ -233,11 +361,13 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.RemoveCurrentAndGetNext()) {
DCHECK(conflict->HasRegisterAssigned());
- CHECK(!conflict->IsFixed());
+ CHECK(!conflict->TopLevel()->IsFixed());
conflict->UnsetAssignedRegister();
+ UnsetOperands(conflict, data());
UpdateWeightAtEviction(conflict);
scheduler().Schedule(conflict);
- TRACE("Evicted range %d.\n", conflict->id());
+ TRACE("Evicted range %d%d.\n", conflict->TopLevel()->vreg(),
+ conflict->relative_id());
}
}
@@ -245,12 +375,13 @@ void GreedyAllocator::EvictAndRescheduleConflicts(unsigned reg_id,
void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
size_t initial_range_count = data()->live_ranges().size();
for (size_t i = 0; i < initial_range_count; ++i) {
- auto range = data()->live_ranges()[i];
+ TopLevelLiveRange* range = data()->live_ranges()[i];
if (!CanProcessRange(range)) continue;
- if (range->HasNoSpillType()) continue;
+ if (!range->HasSpillOperand()) continue;
LifetimePosition start = range->Start();
- TRACE("Live range %d is defined by a spill operand.\n", range->id());
+ TRACE("Live range %d:%d is defined by a spill operand.\n",
+ range->TopLevel()->vreg(), range->relative_id());
auto next_pos = start;
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
@@ -263,12 +394,14 @@ void GreedyAllocator::SplitAndSpillRangesDefinedByMemoryOperand() {
} else if (pos->pos() > range->Start().NextStart()) {
// Do not spill live range eagerly if use position that can benefit from
// the register is too close to the start of live range.
- auto split_pos = pos->pos();
- if (data()->IsBlockBoundary(split_pos.Start())) {
- split_pos = split_pos.Start();
- } else {
- split_pos = split_pos.PrevStart().End();
- }
+ auto split_pos = GetSplitPositionForInstruction(
+ range, pos->pos().ToInstructionIndex());
+ // There is no place to split, so we can't split and spill.
+ if (!split_pos.IsValid()) continue;
+
+ split_pos =
+ FindOptimalSplitPos(range->Start().NextFullStart(), split_pos);
+
Split(range, data(), split_pos);
Spill(range);
}
@@ -284,23 +417,14 @@ void GreedyAllocator::AllocateRegisters() {
data()->debug_name());
SplitAndSpillRangesDefinedByMemoryOperand();
- PreallocateFixedRanges();
+ GroupLiveRanges();
ScheduleAllocationCandidates();
-
+ PreallocateFixedRanges();
while (!scheduler().empty()) {
AllocationCandidate candidate = scheduler().GetNext();
TryAllocateCandidate(candidate);
}
-
- // We do not rely on the hint mechanism used by LinearScan, so no need to
- // actively update/reset operands until the end.
- for (auto range : data()->live_ranges()) {
- if (CanProcessRange(range) && range->HasRegisterAssigned()) {
- UpdateOperands(range, data());
- }
- }
-
for (size_t i = 0; i < allocations_.size(); ++i) {
if (!allocations_[i]->empty()) {
data()->MarkAllocated(mode(), static_cast<int>(i));
@@ -308,21 +432,67 @@ void GreedyAllocator::AllocateRegisters() {
}
allocations_.clear();
+ TryReuseSpillRangesForGroups();
+
TRACE("End allocating function %s with the Greedy Allocator\n",
data()->debug_name());
}
+void GreedyAllocator::TryReuseSpillRangesForGroups() {
+ for (TopLevelLiveRange* top : data()->live_ranges()) {
+ if (!CanProcessRange(top) || !top->is_phi() || top->group() == nullptr) {
+ continue;
+ }
+
+ SpillRange* spill_range = nullptr;
+ for (LiveRange* member : top->group()->ranges()) {
+ if (!member->TopLevel()->HasSpillRange()) continue;
+ SpillRange* member_range = member->TopLevel()->GetSpillRange();
+ if (spill_range == nullptr) {
+ spill_range = member_range;
+ } else {
+ // This may not always succeed, because we group non-conflicting ranges
+ // that may have been splintered, and the splinters may cause conflicts
+ // in the spill ranges.
+ // TODO(mtrofin): should the splinters own their own spill ranges?
+ spill_range->TryMerge(member_range);
+ }
+ }
+ }
+}
+
+
float GreedyAllocator::GetMaximumConflictingWeight(
- unsigned reg_id, const LiveRange* range) const {
+ unsigned reg_id, const LiveRange* range, float competing_weight) const {
float ret = LiveRange::kInvalidWeight;
auto conflicts = current_allocations(reg_id)->GetConflicts(range);
for (LiveRange* conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
DCHECK_NE(conflict->weight(), LiveRange::kInvalidWeight);
+ if (competing_weight <= conflict->weight()) return LiveRange::kMaxWeight;
ret = Max(ret, conflict->weight());
- if (ret == LiveRange::kMaxWeight) return ret;
+ DCHECK(ret < LiveRange::kMaxWeight);
+ }
+
+ return ret;
+}
+
+
+float GreedyAllocator::GetMaximumConflictingWeight(unsigned reg_id,
+ const LiveRangeGroup* group,
+ float group_weight) const {
+ float ret = LiveRange::kInvalidWeight;
+
+ for (LiveRange* member : group->ranges()) {
+ float member_conflict_weight =
+ GetMaximumConflictingWeight(reg_id, member, group_weight);
+ if (member_conflict_weight == LiveRange::kMaxWeight) {
+ return LiveRange::kMaxWeight;
+ }
+ if (member_conflict_weight > group_weight) return LiveRange::kMaxWeight;
+ ret = Max(member_conflict_weight, ret);
}
return ret;
@@ -335,11 +505,11 @@ void GreedyAllocator::EnsureValidRangeWeight(LiveRange* range) {
// unallocated.
if (range->weight() != LiveRange::kInvalidWeight) return;
- if (range->IsFixed()) {
+ if (range->TopLevel()->IsFixed()) {
range->set_weight(LiveRange::kMaxWeight);
return;
}
- if (!IsProgressPossible(range, code())) {
+ if (!IsProgressPossible(range)) {
range->set_weight(LiveRange::kMaxWeight);
return;
}
@@ -361,10 +531,111 @@ void GreedyAllocator::SpillRangeAsLastResort(LiveRange* range) {
}
+LiveRange* GreedyAllocator::GetRemainderAfterSplittingAroundFirstCall(
+ LiveRange* range) {
+ LiveRange* ret = range;
+ for (UseInterval* interval = range->first_interval(); interval != nullptr;
+ interval = interval->next()) {
+ LifetimePosition start = interval->start();
+ LifetimePosition end = interval->end();
+ // If the interval starts at instruction end, then the first instruction
+ // in the interval is the next one.
+ int first_full_instruction = (start.IsGapPosition() || start.IsStart())
+ ? start.ToInstructionIndex()
+ : start.ToInstructionIndex() + 1;
+ // If the interval ends in a gap or at instruction start, then the last
+ // instruction is the previous one.
+ int last_full_instruction = (end.IsGapPosition() || end.IsStart())
+ ? end.ToInstructionIndex() - 1
+ : end.ToInstructionIndex();
+
+ for (int instruction_index = first_full_instruction;
+ instruction_index <= last_full_instruction; ++instruction_index) {
+ if (!code()->InstructionAt(instruction_index)->IsCall()) continue;
+
+ LifetimePosition before =
+ GetSplitPositionForInstruction(range, instruction_index);
+ LiveRange* second_part =
+ before.IsValid() ? Split(range, data(), before) : range;
+
+ if (range != second_part) scheduler().Schedule(range);
+
+ LifetimePosition after =
+ FindSplitPositionAfterCall(second_part, instruction_index);
+
+ if (after.IsValid()) {
+ ret = Split(second_part, data(), after);
+ } else {
+ ret = nullptr;
+ }
+ Spill(second_part);
+ return ret;
+ }
+ }
+ return ret;
+}
+
+
+bool GreedyAllocator::TrySplitAroundCalls(LiveRange* range) {
+ bool modified = false;
+
+ while (range != nullptr) {
+ LiveRange* remainder = GetRemainderAfterSplittingAroundFirstCall(range);
+ // If we performed no modification, we're done.
+ if (remainder == range) {
+ break;
+ }
+ // We performed a modification.
+ modified = true;
+ range = remainder;
+ }
+ // If we have a remainder and we made modifications, it means the remainder
+ // has no calls and we should schedule it for further processing. If we made
+ // no modifications, we will just return false, because we want the algorithm
+ // to make progress by trying some other heuristic.
+ if (modified && range != nullptr) {
+ DCHECK(!range->spilled());
+ DCHECK(!range->HasRegisterAssigned());
+ scheduler().Schedule(range);
+ }
+ return modified;
+}
+
+
+LifetimePosition GreedyAllocator::FindSplitPositionAfterCall(
+ const LiveRange* range, int call_index) {
+ LifetimePosition after_call =
+ Max(range->Start(),
+ LifetimePosition::GapFromInstructionIndex(call_index + 1));
+ UsePosition* next_use = range->NextRegisterPosition(after_call);
+ if (!next_use) return LifetimePosition::Invalid();
+
+ LifetimePosition split_pos = FindOptimalSplitPos(after_call, next_use->pos());
+ split_pos =
+ GetSplitPositionForInstruction(range, split_pos.ToInstructionIndex());
+ return split_pos;
+}
+
+
+LifetimePosition GreedyAllocator::FindSplitPositionBeforeLoops(
+ LiveRange* range) {
+ LifetimePosition end = range->End();
+ if (end.ToInstructionIndex() >= code()->LastInstructionIndex()) {
+ end =
+ LifetimePosition::GapFromInstructionIndex(end.ToInstructionIndex() - 1);
+ }
+ LifetimePosition pos = FindOptimalSplitPos(range->Start(), end);
+ pos = GetSplitPositionForInstruction(range, pos.ToInstructionIndex());
+ return pos;
+}
+
+
void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
- // TODO(mtrofin): replace the call below with the entry point selecting
- // heuristics, once they exist, out of which GLRSP is the last one.
- auto pos = GetLastResortSplitPosition(range, code());
+ if (TrySplitAroundCalls(range)) return;
+
+ LifetimePosition pos = FindSplitPositionBeforeLoops(range);
+
+ if (!pos.IsValid()) pos = GetLastResortSplitPosition(range);
if (pos.IsValid()) {
LiveRange* tail = Split(range, data(), pos);
DCHECK(tail != range);
@@ -376,6 +647,31 @@ void GreedyAllocator::SplitOrSpillBlockedRange(LiveRange* range) {
}
+// Basic heuristic for advancing the algorithm, if any other splitting heuristic
+// failed.
+LifetimePosition GreedyAllocator::GetLastResortSplitPosition(
+ const LiveRange* range) {
+ LifetimePosition previous = range->Start();
+ for (UsePosition *pos = range->NextRegisterPosition(previous); pos != nullptr;
+ previous = previous.NextFullStart(),
+ pos = range->NextRegisterPosition(previous)) {
+ LifetimePosition optimal = FindOptimalSplitPos(previous, pos->pos());
+ LifetimePosition before =
+ GetSplitPositionForInstruction(range, optimal.ToInstructionIndex());
+ if (before.IsValid()) return before;
+ LifetimePosition after = GetSplitPositionForInstruction(
+ range, pos->pos().ToInstructionIndex() + 1);
+ if (after.IsValid()) return after;
+ }
+ return LifetimePosition::Invalid();
+}
+
+
+bool GreedyAllocator::IsProgressPossible(const LiveRange* range) {
+ return range->CanBeSpilled(range->Start()) ||
+ GetLastResortSplitPosition(range).IsValid();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/greedy-allocator.h b/deps/v8/src/compiler/greedy-allocator.h
index c4e330eb97..45bbd87da8 100644
--- a/deps/v8/src/compiler/greedy-allocator.h
+++ b/deps/v8/src/compiler/greedy-allocator.h
@@ -18,21 +18,45 @@ namespace compiler {
// we may extend this to groups of LiveRanges. It has to be comparable.
class AllocationCandidate {
public:
- explicit AllocationCandidate(LiveRange* range) : range_(range) {}
+ explicit AllocationCandidate(LiveRange* range)
+ : is_group_(false), size_(range->GetSize()) {
+ candidate_.range_ = range;
+ }
+
+ explicit AllocationCandidate(LiveRangeGroup* ranges)
+ : is_group_(true), size_(CalculateGroupSize(ranges)) {
+ candidate_.group_ = ranges;
+ }
// Strict ordering operators
bool operator<(const AllocationCandidate& other) const {
- return range_->GetSize() < other.range_->GetSize();
+ return size() < other.size();
}
bool operator>(const AllocationCandidate& other) const {
- return range_->GetSize() > other.range_->GetSize();
+ return size() > other.size();
}
- LiveRange* live_range() const { return range_; }
+ bool is_group() const { return is_group_; }
+ LiveRange* live_range() const { return candidate_.range_; }
+ LiveRangeGroup* group() const { return candidate_.group_; }
private:
- LiveRange* range_;
+ unsigned CalculateGroupSize(LiveRangeGroup* group) {
+ unsigned ret = 0;
+ for (LiveRange* range : group->ranges()) {
+ ret += range->GetSize();
+ }
+ return ret;
+ }
+
+ unsigned size() const { return size_; }
+ bool is_group_;
+ unsigned size_;
+ union {
+ LiveRange* range_;
+ LiveRangeGroup* group_;
+ } candidate_;
};
@@ -41,6 +65,7 @@ class AllocationScheduler final : ZoneObject {
public:
explicit AllocationScheduler(Zone* zone) : queue_(zone) {}
void Schedule(LiveRange* range);
+ void Schedule(LiveRangeGroup* group);
AllocationCandidate GetNext();
bool empty() const { return queue_.empty(); }
@@ -85,12 +110,15 @@ class GreedyAllocator final : public RegisterAllocator {
}
Zone* local_zone() const { return local_zone_; }
+ ZoneVector<LiveRangeGroup*>& groups() { return groups_; }
+ const ZoneVector<LiveRangeGroup*>& groups() const { return groups_; }
// Insert fixed ranges.
void PreallocateFixedRanges();
+ void GroupLiveRanges();
+
// Schedule unassigned live ranges for allocation.
- // TODO(mtrofin): groups.
void ScheduleAllocationCandidates();
void AllocateRegisterToRange(unsigned reg_id, LiveRange* range) {
@@ -106,6 +134,7 @@ class GreedyAllocator final : public RegisterAllocator {
void TryAllocateCandidate(const AllocationCandidate& candidate);
void TryAllocateLiveRange(LiveRange* range);
+ void TryAllocateGroup(LiveRangeGroup* group);
bool CanProcessRange(LiveRange* range) const {
return range != nullptr && !range->IsEmpty() && range->kind() == mode();
@@ -119,12 +148,47 @@ class GreedyAllocator final : public RegisterAllocator {
// Returns kInvalidWeight if there are no conflicts, or the largest weight of
// a range conflicting with the given range, at the given register.
+ float GetMaximumConflictingWeight(unsigned reg_id, const LiveRange* range,
+ float competing_weight) const;
+
+ // Returns kInvalidWeight if there are no conflicts, or the largest weight of
+ // a range conflicting with the given range, at the given register.
float GetMaximumConflictingWeight(unsigned reg_id,
- const LiveRange* range) const;
+ const LiveRangeGroup* group,
+ float group_weight) const;
// This is the extension point for splitting heuristics.
void SplitOrSpillBlockedRange(LiveRange* range);
+ // Find a good position where to fill, after a range was spilled after a call.
+ LifetimePosition FindSplitPositionAfterCall(const LiveRange* range,
+ int call_index);
+ // Split a range around all calls it passes over. Returns true if any changes
+ // were made, or false if no calls were found.
+ bool TrySplitAroundCalls(LiveRange* range);
+
+ // Find a split position at the outmost loop.
+ LifetimePosition FindSplitPositionBeforeLoops(LiveRange* range);
+
+ // Finds the first call instruction in the path of this range. Splits before
+ // and requeues that segment (if any), spills the section over the call, and
+ // returns the section after the call. The return is:
+ // - same range, if no call was found
+ // - nullptr, if the range finished at the call and there's no "after the
+ // call" portion.
+ // - the portion after the call.
+ LiveRange* GetRemainderAfterSplittingAroundFirstCall(LiveRange* range);
+
+ // While we attempt to merge spill ranges later on in the allocation pipeline,
+ // we want to ensure group elements get merged. Waiting until later may hinder
+ // merge-ability, since the pipeline merger (being naive) may create conflicts
+ // between spill ranges of group members.
+ void TryReuseSpillRangesForGroups();
+
+ LifetimePosition GetLastResortSplitPosition(const LiveRange* range);
+
+ bool IsProgressPossible(const LiveRange* range);
+
// Necessary heuristic: spill when all else failed.
void SpillRangeAsLastResort(LiveRange* range);
@@ -133,6 +197,8 @@ class GreedyAllocator final : public RegisterAllocator {
Zone* local_zone_;
ZoneVector<CoalescedLiveRanges*> allocations_;
AllocationScheduler scheduler_;
+ ZoneVector<LiveRangeGroup*> groups_;
+
DISALLOW_COPY_AND_ASSIGN(GreedyAllocator);
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 4241a5e982..d4fe21505c 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -840,6 +840,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movss(operand, i.InputDoubleRegister(index));
}
break;
+ case kIA32BitcastFI:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kIA32BitcastIF:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
@@ -954,6 +968,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
} // NOLINT(readability/fn_size)
@@ -1008,6 +1026,9 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
+ default:
+ UNREACHABLE();
+ break;
}
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
@@ -1078,6 +1099,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kNotOverflow:
cc = no_overflow;
break;
+ default:
+ UNREACHABLE();
+ break;
}
__ bind(&check);
if (reg.is_byte_register()) {
@@ -1518,15 +1542,17 @@ void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
}
}
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index 4002a6776d..2119947e94 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -89,6 +89,8 @@ namespace compiler {
V(IA32Movl) \
V(IA32Movss) \
V(IA32Movsd) \
+ V(IA32BitcastFI) \
+ V(IA32BitcastIF) \
V(IA32Lea) \
V(IA32Push) \
V(IA32Poke) \
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 1ea8dd6201..792d1d5a47 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -36,9 +36,9 @@ class IA32OperandGenerator final : public OperandGenerator {
case IrOpcode::kHeapConstant: {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
- Isolate* isolate = value.handle()->GetIsolate();
- return !isolate->heap()->InNewSpace(*value.handle());
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ Isolate* isolate = value->GetIsolate();
+ return !isolate->heap()->InNewSpace(*value);
}
default:
return false;
@@ -687,6 +687,18 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index a9db281fd3..cb47be6446 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -56,11 +56,13 @@ namespace compiler {
V(CheckedLoadInt16) \
V(CheckedLoadUint16) \
V(CheckedLoadWord32) \
+ V(CheckedLoadWord64) \
V(CheckedLoadFloat32) \
V(CheckedLoadFloat64) \
V(CheckedStoreWord8) \
V(CheckedStoreWord16) \
V(CheckedStoreWord32) \
+ V(CheckedStoreWord64) \
V(CheckedStoreFloat32) \
V(CheckedStoreFloat64) \
TARGET_ARCH_OPCODE_LIST(V)
@@ -112,6 +114,14 @@ enum FlagsCondition {
kUnsignedGreaterThanOrEqual,
kUnsignedLessThanOrEqual,
kUnsignedGreaterThan,
+ kFloatLessThanOrUnordered,
+ kFloatGreaterThanOrEqual,
+ kFloatLessThanOrEqual,
+ kFloatGreaterThanOrUnordered,
+ kFloatLessThan,
+ kFloatGreaterThanOrEqualOrUnordered,
+ kFloatLessThanOrEqualOrUnordered,
+ kFloatGreaterThan,
kUnorderedEqual,
kUnorderedNotEqual,
kOverflow,
@@ -122,6 +132,8 @@ inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
return static_cast<FlagsCondition>(condition ^ 1);
}
+FlagsCondition CommuteFlagsCondition(FlagsCondition condition);
+
std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc);
// The InstructionCode is an opaque, target-specific integer that encodes
@@ -137,8 +149,8 @@ typedef int32_t InstructionCode;
typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
typedef BitField<AddressingMode, 8, 5> AddressingModeField;
typedef BitField<FlagsMode, 13, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 15, 4> FlagsConditionField;
-typedef BitField<int, 19, 13> MiscField;
+typedef BitField<FlagsCondition, 15, 5> FlagsConditionField;
+typedef BitField<int, 20, 12> MiscField;
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 50a94342c9..88283d4898 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -183,7 +183,7 @@ class OperandGenerator {
case IrOpcode::kExternalConstant:
return Constant(OpParameter<ExternalReference>(node));
case IrOpcode::kHeapConstant:
- return Constant(OpParameter<Unique<HeapObject> >(node).handle());
+ return Constant(OpParameter<Handle<HeapObject>>(node));
default:
break;
}
@@ -291,41 +291,7 @@ class FlagsContinuation final {
void Commute() {
DCHECK(!IsNone());
- switch (condition_) {
- case kEqual:
- case kNotEqual:
- case kOverflow:
- case kNotOverflow:
- return;
- case kSignedLessThan:
- condition_ = kSignedGreaterThan;
- return;
- case kSignedGreaterThanOrEqual:
- condition_ = kSignedLessThanOrEqual;
- return;
- case kSignedLessThanOrEqual:
- condition_ = kSignedGreaterThanOrEqual;
- return;
- case kSignedGreaterThan:
- condition_ = kSignedLessThan;
- return;
- case kUnsignedLessThan:
- condition_ = kUnsignedGreaterThan;
- return;
- case kUnsignedGreaterThanOrEqual:
- condition_ = kUnsignedLessThanOrEqual;
- return;
- case kUnsignedLessThanOrEqual:
- condition_ = kUnsignedGreaterThanOrEqual;
- return;
- case kUnsignedGreaterThan:
- condition_ = kUnsignedLessThan;
- return;
- case kUnorderedEqual:
- case kUnorderedNotEqual:
- return;
- }
- UNREACHABLE();
+ condition_ = CommuteFlagsCondition(condition_);
}
void OverwriteAndNegateIfEqual(FlagsCondition condition) {
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 0ae1c63fea..7200bf0e7a 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -360,7 +360,8 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
Node* frame_state =
call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
AddFrameStateInputs(frame_state, &buffer->instruction_args,
- buffer->frame_state_descriptor);
+ buffer->frame_state_descriptor,
+ FrameStateInputKind::kStackSlot);
}
DCHECK(1 + buffer->frame_state_value_count() ==
buffer->instruction_args.size());
@@ -698,6 +699,14 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
+ case IrOpcode::kBitcastFloat32ToInt32:
+ return MarkAsWord32(node), VisitBitcastFloat32ToInt32(node);
+ case IrOpcode::kBitcastFloat64ToInt64:
+ return MarkAsWord64(node), VisitBitcastFloat64ToInt64(node);
+ case IrOpcode::kBitcastInt32ToFloat32:
+ return MarkAsFloat32(node), VisitBitcastInt32ToFloat32(node);
+ case IrOpcode::kBitcastInt64ToFloat64:
+ return MarkAsFloat64(node), VisitBitcastInt64ToFloat64(node);
case IrOpcode::kFloat32Add:
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
@@ -903,6 +912,16 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
UNIMPLEMENTED();
}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ UNIMPLEMENTED();
+}
+
#endif // V8_TARGET_ARCH_32_BIT
@@ -1016,7 +1035,7 @@ void InstructionSelector::VisitDeoptimize(Node* value) {
sequence()->AddFrameStateDescriptor(desc);
args.push_back(g.TempImmediate(state_id.ToInt()));
- AddFrameStateInputs(value, &args, desc);
+ AddFrameStateInputs(value, &args, desc, FrameStateInputKind::kAny);
DCHECK_EQ(args.size(), arg_count);
@@ -1059,7 +1078,8 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
}
-static InstructionOperand SlotOrImmediate(OperandGenerator* g, Node* input) {
+InstructionOperand InstructionSelector::OperandForDeopt(
+ OperandGenerator* g, Node* input, FrameStateInputKind kind) {
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
@@ -1067,19 +1087,27 @@ static InstructionOperand SlotOrImmediate(OperandGenerator* g, Node* input) {
case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
default:
- return g->UseUniqueSlot(input);
+ switch (kind) {
+ case FrameStateInputKind::kStackSlot:
+ return g->UseUniqueSlot(input);
+ case FrameStateInputKind::kAny:
+ return g->Use(input);
+ }
+ UNREACHABLE();
+ return InstructionOperand();
}
}
-void InstructionSelector::AddFrameStateInputs(
- Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor) {
+void InstructionSelector::AddFrameStateInputs(Node* state,
+ InstructionOperandVector* inputs,
+ FrameStateDescriptor* descriptor,
+ FrameStateInputKind kind) {
DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
if (descriptor->outer_state()) {
AddFrameStateInputs(state->InputAt(kFrameStateOuterStateInput), inputs,
- descriptor->outer_state());
+ descriptor->outer_state(), kind);
}
Node* parameters = state->InputAt(kFrameStateParametersInput);
@@ -1098,23 +1126,23 @@ void InstructionSelector::AddFrameStateInputs(
OperandGenerator g(this);
size_t value_index = 0;
- inputs->push_back(SlotOrImmediate(&g, function));
+ inputs->push_back(OperandForDeopt(&g, function, kind));
descriptor->SetType(value_index++, kMachAnyTagged);
for (StateValuesAccess::TypedNode input_node :
StateValuesAccess(parameters)) {
- inputs->push_back(SlotOrImmediate(&g, input_node.node));
+ inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
descriptor->SetType(value_index++, input_node.type);
}
if (descriptor->HasContext()) {
- inputs->push_back(SlotOrImmediate(&g, context));
+ inputs->push_back(OperandForDeopt(&g, context, kind));
descriptor->SetType(value_index++, kMachAnyTagged);
}
for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
- inputs->push_back(SlotOrImmediate(&g, input_node.node));
+ inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
descriptor->SetType(value_index++, input_node.type);
}
for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
- inputs->push_back(SlotOrImmediate(&g, input_node.node));
+ inputs->push_back(OperandForDeopt(&g, input_node.node, kind));
descriptor->SetType(value_index++, input_node.type);
}
DCHECK(value_index == descriptor->GetSize());
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 9a0744720a..b8354fcfd1 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -22,6 +22,7 @@ class BasicBlock;
struct CallBuffer; // TODO(bmeurer): Remove this.
class FlagsContinuation;
class Linkage;
+class OperandGenerator;
struct SwitchInfo;
typedef ZoneVector<InstructionOperand> InstructionOperandVector;
@@ -173,8 +174,13 @@ class InstructionSelector final {
bool call_address_immediate);
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+
+ enum class FrameStateInputKind { kAny, kStackSlot };
void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
- FrameStateDescriptor* descriptor);
+ FrameStateDescriptor* descriptor,
+ FrameStateInputKind kind);
+ static InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
+ FrameStateInputKind kind);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index 9aebb9a17a..0fbb94979e 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -11,6 +11,54 @@ namespace v8 {
namespace internal {
namespace compiler {
+
+FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
+ switch (condition) {
+ case kSignedLessThan:
+ return kSignedGreaterThan;
+ case kSignedGreaterThanOrEqual:
+ return kSignedLessThanOrEqual;
+ case kSignedLessThanOrEqual:
+ return kSignedGreaterThanOrEqual;
+ case kSignedGreaterThan:
+ return kSignedLessThan;
+ case kUnsignedLessThan:
+ return kUnsignedGreaterThan;
+ case kUnsignedGreaterThanOrEqual:
+ return kUnsignedLessThanOrEqual;
+ case kUnsignedLessThanOrEqual:
+ return kUnsignedGreaterThanOrEqual;
+ case kUnsignedGreaterThan:
+ return kUnsignedLessThan;
+ case kFloatLessThanOrUnordered:
+ return kFloatGreaterThanOrUnordered;
+ case kFloatGreaterThanOrEqual:
+ return kFloatLessThanOrEqual;
+ case kFloatLessThanOrEqual:
+ return kFloatGreaterThanOrEqual;
+ case kFloatGreaterThanOrUnordered:
+ return kFloatLessThanOrUnordered;
+ case kFloatLessThan:
+ return kFloatGreaterThan;
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return kFloatLessThanOrEqualOrUnordered;
+ case kFloatLessThanOrEqualOrUnordered:
+ return kFloatGreaterThanOrEqualOrUnordered;
+ case kFloatGreaterThan:
+ return kFloatLessThan;
+ case kEqual:
+ case kNotEqual:
+ case kOverflow:
+ case kNotOverflow:
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ return condition;
+ }
+ UNREACHABLE();
+ return condition;
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
const InstructionOperand& op = printable.op_;
@@ -300,6 +348,22 @@ std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc) {
return os << "unsigned less than or equal";
case kUnsignedGreaterThan:
return os << "unsigned greater than";
+ case kFloatLessThanOrUnordered:
+ return os << "less than or unordered (FP)";
+ case kFloatGreaterThanOrEqual:
+ return os << "greater than or equal (FP)";
+ case kFloatLessThanOrEqual:
+ return os << "less than or equal (FP)";
+ case kFloatGreaterThanOrUnordered:
+ return os << "greater than or unordered (FP)";
+ case kFloatLessThan:
+ return os << "less than (FP)";
+ case kFloatGreaterThanOrEqualOrUnordered:
+ return os << "greater than, equal or unordered (FP)";
+ case kFloatLessThanOrEqualOrUnordered:
+ return os << "less than, equal or unordered (FP)";
+ case kFloatGreaterThan:
+ return os << "greater than (FP)";
case kUnorderedEqual:
return os << "unordered equal";
case kUnorderedNotEqual:
@@ -417,7 +481,8 @@ InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
handler_(handler),
needs_frame_(false),
must_construct_frame_(false),
- must_deconstruct_frame_(false) {}
+ must_deconstruct_frame_(false),
+ last_deferred_(RpoNumber::Invalid()) {}
size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index 4f6a515f11..a0718f3c21 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -778,9 +778,13 @@ class RpoNumber final {
return other.index_ == this->index_ + 1;
}
- bool operator==(RpoNumber other) const {
- return this->index_ == other.index_;
- }
+ // Comparison operators.
+ bool operator==(RpoNumber other) const { return index_ == other.index_; }
+ bool operator!=(RpoNumber other) const { return index_ != other.index_; }
+ bool operator>(RpoNumber other) const { return index_ > other.index_; }
+ bool operator<(RpoNumber other) const { return index_ < other.index_; }
+ bool operator<=(RpoNumber other) const { return index_ <= other.index_; }
+ bool operator>=(RpoNumber other) const { return index_ >= other.index_; }
private:
explicit RpoNumber(int32_t index) : index_(index) {}
@@ -992,6 +996,9 @@ class InstructionBlock final : public ZoneObject {
bool must_deconstruct_frame() const { return must_deconstruct_frame_; }
void mark_must_deconstruct_frame() { must_deconstruct_frame_ = true; }
+ void set_last_deferred(RpoNumber last) { last_deferred_ = last; }
+ RpoNumber last_deferred() const { return last_deferred_; }
+
private:
Successors successors_;
Predecessors predecessors_;
@@ -1007,6 +1014,7 @@ class InstructionBlock final : public ZoneObject {
bool needs_frame_;
bool must_construct_frame_;
bool must_deconstruct_frame_;
+ RpoNumber last_deferred_;
};
typedef ZoneDeque<Constant> ConstantDeque;
diff --git a/deps/v8/src/compiler/interpreter-assembler.cc b/deps/v8/src/compiler/interpreter-assembler.cc
index 47e014ba39..1f5c0a26a5 100644
--- a/deps/v8/src/compiler/interpreter-assembler.cc
+++ b/deps/v8/src/compiler/interpreter-assembler.cc
@@ -6,6 +6,7 @@
#include <ostream>
+#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
@@ -14,6 +15,7 @@
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
#include "src/frames.h"
+#include "src/interface-descriptors.h"
#include "src/interpreter/bytecodes.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
@@ -30,7 +32,7 @@ InterpreterAssembler::InterpreterAssembler(Isolate* isolate, Zone* zone,
isolate, new (zone) Graph(zone),
Linkage::GetInterpreterDispatchDescriptor(zone), kMachPtr,
InstructionSelector::SupportedMachineOperatorFlags())),
- end_node_(nullptr),
+ end_nodes_(zone),
accumulator_(
raw_assembler_->Parameter(Linkage::kInterpreterAccumulatorParameter)),
code_generated_(false) {}
@@ -44,15 +46,17 @@ Handle<Code> InterpreterAssembler::GenerateCode() {
End();
+ const char* bytecode_name = interpreter::Bytecodes::ToString(bytecode_);
Schedule* schedule = raw_assembler_->Export();
// TODO(rmcilroy): use a non-testing code generator.
- Handle<Code> code = Pipeline::GenerateCodeForTesting(
- isolate(), raw_assembler_->call_descriptor(), graph(), schedule);
+ Handle<Code> code = Pipeline::GenerateCodeForInterpreter(
+ isolate(), raw_assembler_->call_descriptor(), graph(), schedule,
+ bytecode_name);
#ifdef ENABLE_DISASSEMBLER
if (FLAG_trace_ignition_codegen) {
OFStream os(stdout);
- code->Disassemble(interpreter::Bytecodes::ToString(bytecode_), os);
+ code->Disassemble(bytecode_name, os);
os << std::flush;
}
#endif
@@ -72,6 +76,11 @@ void InterpreterAssembler::SetAccumulator(Node* value) {
}
+Node* InterpreterAssembler::ContextTaggedPointer() {
+ return raw_assembler_->Parameter(Linkage::kInterpreterContextParameter);
+}
+
+
Node* InterpreterAssembler::RegisterFileRawPointer() {
return raw_assembler_->Parameter(Linkage::kInterpreterRegisterFileParameter);
}
@@ -94,18 +103,23 @@ Node* InterpreterAssembler::DispatchTableRawPointer() {
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return raw_assembler_->WordShl(index, Int32Constant(kPointerSizeLog2));
+ return WordShl(index, kPointerSizeLog2);
+}
+
+
+Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
+ return IntPtrAdd(RegisterFileRawPointer(), RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::LoadRegister(Node* reg_index) {
- return raw_assembler_->Load(kMachPtr, RegisterFileRawPointer(),
+ return raw_assembler_->Load(kMachAnyTagged, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index));
}
Node* InterpreterAssembler::StoreRegister(Node* value, Node* reg_index) {
- return raw_assembler_->Store(kMachPtr, RegisterFileRawPointer(),
+ return raw_assembler_->Store(kMachAnyTagged, RegisterFileRawPointer(),
RegisterFrameOffset(reg_index), value);
}
@@ -114,8 +128,7 @@ Node* InterpreterAssembler::BytecodeOperand(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
return raw_assembler_->Load(
kMachUint8, BytecodeArrayTaggedPointer(),
- raw_assembler_->IntPtrAdd(BytecodeOffset(),
- Int32Constant(1 + operand_index)));
+ IntPtrAdd(BytecodeOffset(), Int32Constant(1 + operand_index)));
}
@@ -123,8 +136,7 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
DCHECK_LT(operand_index, interpreter::Bytecodes::NumberOfOperands(bytecode_));
Node* load = raw_assembler_->Load(
kMachInt8, BytecodeArrayTaggedPointer(),
- raw_assembler_->IntPtrAdd(BytecodeOffset(),
- Int32Constant(1 + operand_index)));
+ IntPtrAdd(BytecodeOffset(), Int32Constant(1 + operand_index)));
// Ensure that we sign extend to full pointer size
if (kPointerSize == 8) {
load = raw_assembler_->ChangeInt32ToInt64(load);
@@ -133,6 +145,13 @@ Node* InterpreterAssembler::BytecodeOperandSignExtended(int operand_index) {
}
+Node* InterpreterAssembler::BytecodeOperandCount(int operand_index) {
+ DCHECK_EQ(interpreter::OperandType::kCount,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+}
+
+
Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
DCHECK_EQ(interpreter::OperandType::kImm8,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -140,6 +159,13 @@ Node* InterpreterAssembler::BytecodeOperandImm8(int operand_index) {
}
+Node* InterpreterAssembler::BytecodeOperandIdx(int operand_index) {
+ DCHECK_EQ(interpreter::OperandType::kIdx,
+ interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
+ return BytecodeOperand(operand_index);
+}
+
+
Node* InterpreterAssembler::BytecodeOperandReg(int operand_index) {
DCHECK_EQ(interpreter::OperandType::kReg,
interpreter::Bytecodes::GetOperandType(bytecode_, operand_index));
@@ -152,16 +178,26 @@ Node* InterpreterAssembler::Int32Constant(int value) {
}
+Node* InterpreterAssembler::IntPtrConstant(intptr_t value) {
+ return raw_assembler_->IntPtrConstant(value);
+}
+
+
Node* InterpreterAssembler::NumberConstant(double value) {
return raw_assembler_->NumberConstant(value);
}
-Node* InterpreterAssembler::HeapConstant(Unique<HeapObject> object) {
+Node* InterpreterAssembler::HeapConstant(Handle<HeapObject> object) {
return raw_assembler_->HeapConstant(object);
}
+Node* InterpreterAssembler::BooleanConstant(bool value) {
+ return raw_assembler_->BooleanConstant(value);
+}
+
+
Node* InterpreterAssembler::SmiShiftBitsConstant() {
return Int32Constant(kSmiShiftSize + kSmiTagSize);
}
@@ -177,32 +213,180 @@ Node* InterpreterAssembler::SmiUntag(Node* value) {
}
+Node* InterpreterAssembler::IntPtrAdd(Node* a, Node* b) {
+ return raw_assembler_->IntPtrAdd(a, b);
+}
+
+
+Node* InterpreterAssembler::IntPtrSub(Node* a, Node* b) {
+ return raw_assembler_->IntPtrSub(a, b);
+}
+
+
+Node* InterpreterAssembler::WordShl(Node* value, int shift) {
+ return raw_assembler_->WordShl(value, Int32Constant(shift));
+}
+
+
+Node* InterpreterAssembler::LoadConstantPoolEntry(Node* index) {
+ Node* constant_pool = LoadObjectField(BytecodeArrayTaggedPointer(),
+ BytecodeArray::kConstantPoolOffset);
+ Node* entry_offset =
+ IntPtrAdd(IntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ WordShl(index, kPointerSizeLog2));
+ return raw_assembler_->Load(kMachAnyTagged, constant_pool, entry_offset);
+}
+
+
+Node* InterpreterAssembler::LoadObjectField(Node* object, int offset) {
+ return raw_assembler_->Load(kMachAnyTagged, object,
+ IntPtrConstant(offset - kHeapObjectTag));
+}
+
+
+Node* InterpreterAssembler::LoadContextSlot(Node* context, int slot_index) {
+ return raw_assembler_->Load(kMachAnyTagged, context,
+ IntPtrConstant(Context::SlotOffset(slot_index)));
+}
+
+
+Node* InterpreterAssembler::LoadContextSlot(int slot_index) {
+ return LoadContextSlot(ContextTaggedPointer(), slot_index);
+}
+
+
+Node* InterpreterAssembler::LoadTypeFeedbackVector() {
+ Node* function = raw_assembler_->Load(
+ kMachAnyTagged, RegisterFileRawPointer(),
+ IntPtrConstant(InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Node* shared_info =
+ LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
+ Node* vector =
+ LoadObjectField(shared_info, SharedFunctionInfo::kFeedbackVectorOffset);
+ return vector;
+}
+
+
+Node* InterpreterAssembler::CallJS(Node* function, Node* first_arg,
+ Node* arg_count) {
+ Callable builtin = CodeFactory::PushArgsAndCall(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), builtin.descriptor(), 0, CallDescriptor::kNoFlags);
+
+ Node* code_target = HeapConstant(builtin.code());
+
+ Node** args = zone()->NewArray<Node*>(4);
+ args[0] = arg_count;
+ args[1] = first_arg;
+ args[2] = function;
+ args[3] = ContextTaggedPointer();
+
+ return raw_assembler_->CallN(descriptor, code_target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node** args) {
+ CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), descriptor, 0, CallDescriptor::kNoFlags);
+ return raw_assembler_->CallN(call_descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4) {
+ Node** args = zone()->NewArray<Node*>(5);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = ContextTaggedPointer();
+ return CallIC(descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallIC(CallInterfaceDescriptor descriptor,
+ Node* target, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ Node** args = zone()->NewArray<Node*>(6);
+ args[0] = arg1;
+ args[1] = arg2;
+ args[2] = arg3;
+ args[3] = arg4;
+ args[4] = arg5;
+ args[5] = ContextTaggedPointer();
+ return CallIC(descriptor, target, args);
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1) {
+ return raw_assembler_->CallRuntime1(function_id, arg1,
+ ContextTaggedPointer());
+}
+
+
+Node* InterpreterAssembler::CallRuntime(Runtime::FunctionId function_id,
+ Node* arg1, Node* arg2) {
+ return raw_assembler_->CallRuntime2(function_id, arg1, arg2,
+ ContextTaggedPointer());
+}
+
+
void InterpreterAssembler::Return() {
Node* exit_trampoline_code_object =
- HeapConstant(Unique<HeapObject>::CreateImmovable(
- isolate()->builtins()->InterpreterExitTrampoline()));
+ HeapConstant(isolate()->builtins()->InterpreterExitTrampoline());
// If the order of the parameters you need to change the call signature below.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
STATIC_ASSERT(1 == Linkage::kInterpreterRegisterFileParameter);
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- Node* tail_call = raw_assembler_->TailCallInterpreterDispatch(
- call_descriptor(), exit_trampoline_code_object, GetAccumulator(),
- RegisterFileRawPointer(), BytecodeOffset(), BytecodeArrayTaggedPointer(),
- DispatchTableRawPointer());
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ Node* args[] = { GetAccumulator(),
+ RegisterFileRawPointer(),
+ BytecodeOffset(),
+ BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer(),
+ ContextTaggedPointer() };
+ Node* tail_call = raw_assembler_->TailCallN(
+ call_descriptor(), exit_trampoline_code_object, args);
// This should always be the end node.
- SetEndInput(tail_call);
+ AddEndInput(tail_call);
}
Node* InterpreterAssembler::Advance(int delta) {
- return raw_assembler_->IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+ return IntPtrAdd(BytecodeOffset(), Int32Constant(delta));
+}
+
+
+Node* InterpreterAssembler::Advance(Node* delta) {
+ return raw_assembler_->IntPtrAdd(BytecodeOffset(), delta);
+}
+
+
+void InterpreterAssembler::Jump(Node* delta) { DispatchTo(Advance(delta)); }
+
+
+void InterpreterAssembler::JumpIfWordEqual(Node* lhs, Node* rhs, Node* delta) {
+ RawMachineAssembler::Label match, no_match;
+ Node* condition = raw_assembler_->WordEqual(lhs, rhs);
+ raw_assembler_->Branch(condition, &match, &no_match);
+ raw_assembler_->Bind(&match);
+ DispatchTo(Advance(delta));
+ raw_assembler_->Bind(&no_match);
+ Dispatch();
}
void InterpreterAssembler::Dispatch() {
- Node* new_bytecode_offset = Advance(interpreter::Bytecodes::Size(bytecode_));
+ DispatchTo(Advance(interpreter::Bytecodes::Size(bytecode_)));
+}
+
+
+void InterpreterAssembler::DispatchTo(Node* new_bytecode_offset) {
Node* target_bytecode = raw_assembler_->Load(
kMachUint8, BytecodeArrayTaggedPointer(), new_bytecode_offset);
@@ -219,25 +403,31 @@ void InterpreterAssembler::Dispatch() {
STATIC_ASSERT(2 == Linkage::kInterpreterBytecodeOffsetParameter);
STATIC_ASSERT(3 == Linkage::kInterpreterBytecodeArrayParameter);
STATIC_ASSERT(4 == Linkage::kInterpreterDispatchTableParameter);
- Node* tail_call = raw_assembler_->TailCallInterpreterDispatch(
- call_descriptor(), target_code_object, GetAccumulator(),
- RegisterFileRawPointer(), new_bytecode_offset,
- BytecodeArrayTaggedPointer(), DispatchTableRawPointer());
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ Node* args[] = { GetAccumulator(),
+ RegisterFileRawPointer(),
+ new_bytecode_offset,
+ BytecodeArrayTaggedPointer(),
+ DispatchTableRawPointer(),
+ ContextTaggedPointer() };
+ Node* tail_call =
+ raw_assembler_->TailCallN(call_descriptor(), target_code_object, args);
// This should always be the end node.
- SetEndInput(tail_call);
+ AddEndInput(tail_call);
}
-void InterpreterAssembler::SetEndInput(Node* input) {
- DCHECK(!end_node_);
- end_node_ = input;
+void InterpreterAssembler::AddEndInput(Node* input) {
+ DCHECK_NOT_NULL(input);
+ end_nodes_.push_back(input);
}
void InterpreterAssembler::End() {
- DCHECK(end_node_);
- // TODO(rmcilroy): Support more than 1 end input.
- Node* end = graph()->NewNode(raw_assembler_->common()->End(1), end_node_);
+ DCHECK(!end_nodes_.empty());
+ int end_count = static_cast<int>(end_nodes_.size());
+ Node* end = graph()->NewNode(raw_assembler_->common()->End(end_count),
+ end_count, &end_nodes_[0]);
graph()->SetEnd(end);
}
@@ -259,6 +449,8 @@ Schedule* InterpreterAssembler::schedule() {
}
+Zone* InterpreterAssembler::zone() { return raw_assembler_->zone(); }
+
} // namespace interpreter
} // namespace internal
diff --git a/deps/v8/src/compiler/interpreter-assembler.h b/deps/v8/src/compiler/interpreter-assembler.h
index 4662fc8042..67ab9cc2a9 100644
--- a/deps/v8/src/compiler/interpreter-assembler.h
+++ b/deps/v8/src/compiler/interpreter-assembler.h
@@ -9,13 +9,16 @@
// Do not include anything from src/compiler here!
#include "src/allocation.h"
#include "src/base/smart-pointers.h"
+#include "src/builtins.h"
#include "src/frames.h"
#include "src/interpreter/bytecodes.h"
-#include "src/unique.h"
+#include "src/runtime/runtime.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
+class CallInterfaceDescriptor;
class Isolate;
class Zone;
@@ -36,6 +39,12 @@ class InterpreterAssembler {
Handle<Code> GenerateCode();
+ // Returns the count immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandCount(int operand_index);
+ // Returns the index immediate for bytecode operand |operand_index| in the
+ // current bytecode.
+ Node* BytecodeOperandIdx(int operand_index);
// Returns the Imm8 immediate for bytecode operand |operand_index| in the
// current bytecode.
Node* BytecodeOperandImm8(int operand_index);
@@ -51,15 +60,62 @@ class InterpreterAssembler {
Node* LoadRegister(Node* reg_index);
Node* StoreRegister(Node* value, Node* reg_index);
+ // Returns the location in memory of the register |reg_index| in the
+ // interpreter register file.
+ Node* RegisterLocation(Node* reg_index);
+
// Constants.
Node* Int32Constant(int value);
+ Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
- Node* HeapConstant(Unique<HeapObject> object);
+ Node* HeapConstant(Handle<HeapObject> object);
+ Node* BooleanConstant(bool value);
// Tag and untag Smi values.
Node* SmiTag(Node* value);
Node* SmiUntag(Node* value);
+ // Basic arithmetic operations.
+ Node* IntPtrAdd(Node* a, Node* b);
+ Node* IntPtrSub(Node* a, Node* b);
+ Node* WordShl(Node* value, int shift);
+
+ // Load constant at |index| in the constant pool.
+ Node* LoadConstantPoolEntry(Node* index);
+
+ // Load a field from an object on the heap.
+ Node* LoadObjectField(Node* object, int offset);
+
+ // Load |slot_index| from a context.
+ Node* LoadContextSlot(Node* context, int slot_index);
+
+ // Load |slot_index| from the current context.
+ Node* LoadContextSlot(int slot_index);
+
+ // Load the TypeFeedbackVector for the current function.
+ Node* LoadTypeFeedbackVector();
+
+ // Call JSFunction or Callable |function| with |arg_count| (not including
+ // receiver) and the first argument located at |first_arg|.
+ Node* CallJS(Node* function, Node* first_arg, Node* arg_count);
+
+ // Call an IC code stub.
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node* arg1,
+ Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+
+ // Call runtime function.
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1);
+ Node* CallRuntime(Runtime::FunctionId function_id, Node* arg1, Node* arg2);
+
+ // Jump relative to the current bytecode by |jump_offset|.
+ void Jump(Node* jump_offset);
+
+ // Jump relative to the current bytecode by |jump_offset| if the
+ // word values |lhs| and |rhs| are equal.
+ void JumpIfWordEqual(Node* lhs, Node* rhs, Node* jump_offset);
+
// Returns from the function.
void Return();
@@ -81,8 +137,10 @@ class InterpreterAssembler {
Node* BytecodeArrayTaggedPointer();
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
Node* BytecodeOffset();
- // Returns a pointer to first entry in the interpreter dispatch table.
+ // Returns a raw pointer to first entry in the interpreter dispatch table.
Node* DispatchTableRawPointer();
+ // Returns a tagged pointer to the current context.
+ Node* ContextTaggedPointer();
// Returns the offset of register |index| relative to RegisterFilePointer().
Node* RegisterFrameOffset(Node* index);
@@ -91,20 +149,29 @@ class InterpreterAssembler {
Node* BytecodeOperand(int operand_index);
Node* BytecodeOperandSignExtended(int operand_index);
+ Node* CallIC(CallInterfaceDescriptor descriptor, Node* target, Node** args);
+ Node* CallJSBuiltin(int context_index, Node* receiver, Node** js_args,
+ int js_arg_count);
+
// Returns BytecodeOffset() advanced by delta bytecodes. Note: this does not
// update BytecodeOffset() itself.
Node* Advance(int delta);
+ Node* Advance(Node* delta);
+
+ // Starts next instruction dispatch at |new_bytecode_offset|.
+ void DispatchTo(Node* new_bytecode_offset);
- // Sets the end node of the graph.
- void SetEndInput(Node* input);
+ // Adds an end node of the graph.
+ void AddEndInput(Node* input);
// Private helpers which delegate to RawMachineAssembler.
Isolate* isolate();
Schedule* schedule();
+ Zone* zone();
interpreter::Bytecode bytecode_;
base::SmartPointer<RawMachineAssembler> raw_assembler_;
- Node* end_node_;
+ ZoneVector<Node*> end_nodes_;
Node* accumulator_;
bool code_generated_;
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index 9c45a043ee..49ed031182 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
+#include "src/objects-inl.h"
#include "src/types.h"
namespace v8 {
@@ -25,8 +26,8 @@ class JSCallReduction {
bool HasBuiltinFunctionId() {
if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
- if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+ if (!m.HasValue() || !m.Value()->IsJSFunction()) return false;
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
return function->shared()->HasBuiltinFunctionId();
}
@@ -34,7 +35,7 @@ class JSCallReduction {
BuiltinFunctionId GetBuiltinFunctionId() {
DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
- Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
return function->shared()->builtin_function_id();
}
@@ -44,20 +45,20 @@ class JSCallReduction {
// Determines whether the call takes one input of the given type.
bool InputsMatchOne(Type* t1) {
return GetJSCallArity() == 1 &&
- NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
+ NodeProperties::GetType(GetJSCallInput(0))->Is(t1);
}
// Determines whether the call takes two inputs of the given types.
bool InputsMatchTwo(Type* t1, Type* t2) {
return GetJSCallArity() == 2 &&
- NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
- NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
+ NodeProperties::GetType(GetJSCallInput(0))->Is(t1) &&
+ NodeProperties::GetType(GetJSCallInput(1))->Is(t2);
}
// Determines whether the call takes inputs all of the given type.
bool InputsMatchAll(Type* t) {
for (int i = 0; i < GetJSCallArity(); i++) {
- if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+ if (!NodeProperties::GetType(GetJSCallInput(i))->Is(t)) {
return false;
}
}
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 07746fa98b..0ad25e179d 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -10,6 +10,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/contexts.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -35,8 +36,7 @@ MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
Node* const object = NodeProperties::GetValueInput(node, 0);
switch (object->opcode()) {
case IrOpcode::kHeapConstant:
- return Handle<Context>::cast(
- OpParameter<Unique<HeapObject>>(object).handle());
+ return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(object));
case IrOpcode::kParameter: {
Node* const start = NodeProperties::GetValueInput(object, 0);
DCHECK_EQ(IrOpcode::kStart, start->opcode());
@@ -77,8 +77,8 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
}
const Operator* op = jsgraph_->javascript()->LoadContext(
0, access.index(), access.immutable());
- node->set_op(op);
node->ReplaceInput(0, jsgraph_->Constant(context));
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
Handle<Object> value =
@@ -119,8 +119,8 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
context = handle(context->previous(), isolate());
}
- node->set_op(javascript()->StoreContext(0, access.index()));
node->ReplaceInput(0, jsgraph_->Constant(context));
+ NodeProperties::ChangeOp(node, javascript()->StoreContext(0, access.index()));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index bec199e0e3..eac0565786 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -11,7 +11,6 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
-#include "src/unique.h"
namespace v8 {
namespace internal {
@@ -86,40 +85,41 @@ REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
#undef REPLACE_BINARY_OP_IC_CALL
-#define REPLACE_COMPARE_IC_CALL(op, token) \
- void JSGenericLowering::Lower##op(Node* node) { \
- ReplaceWithCompareIC(node, token); \
+// These ops are not language mode dependent; we arbitrarily pass Strength::WEAK
+// here.
+#define REPLACE_COMPARE_IC_CALL(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token, Strength::WEAK); \
}
REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
-REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT)
-REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT)
-REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE)
-REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE)
#undef REPLACE_COMPARE_IC_CALL
+#define REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(op, token) \
+ void JSGenericLowering::Lower##op(Node* node) { \
+ ReplaceWithCompareIC(node, token, \
+ strength(OpParameter<LanguageMode>(node))); \
+ }
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThan, Token::LT)
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThan, Token::GT)
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSLessThanOrEqual, Token::LTE)
+REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE(JSGreaterThanOrEqual, Token::GTE)
+#undef REPLACE_COMPARE_IC_CALL_WITH_LANGUAGE_MODE
+
+
#define REPLACE_RUNTIME_CALL(op, fun) \
void JSGenericLowering::Lower##op(Node* node) { \
ReplaceWithRuntimeCall(node, fun); \
}
-REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
-REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
-REPLACE_RUNTIME_CALL(JSCreateScriptContext, Runtime::kNewScriptContext)
#undef REPLACE_RUNTIME
-#define REPLACE_UNIMPLEMENTED(op) \
- void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
-REPLACE_UNIMPLEMENTED(JSYield)
-#undef REPLACE_UNIMPLEMENTED
-
-
static CallDescriptor::Flags FlagsForNode(Node* node) {
CallDescriptor::Flags result = CallDescriptor::kNoFlags;
if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
@@ -129,9 +129,9 @@ static CallDescriptor::Flags FlagsForNode(Node* node) {
}
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
- Callable callable = CodeFactory::CompareIC(
- isolate(), token, strength(OpParameter<LanguageMode>(node)));
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
+ Strength str) {
+ Callable callable = CodeFactory::CompareIC(isolate(), token, str);
// Create a new call node asking a CompareIC for help.
NodeVector inputs(zone());
@@ -140,17 +140,19 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
inputs.push_back(NodeProperties::GetValueInput(node, 0));
inputs.push_back(NodeProperties::GetValueInput(node, 1));
inputs.push_back(NodeProperties::GetContextInput(node));
- if (node->op()->HasProperty(Operator::kPure)) {
- // A pure (strict) comparison doesn't have an effect, control or frame
- // state. But for the graph, we need to add control and effect inputs.
- DCHECK(OperatorProperties::GetFrameStateInputCount(node->op()) == 0);
- inputs.push_back(graph()->start());
- inputs.push_back(graph()->start());
- } else {
+ // Some comparisons (StrictEqual) don't have an effect, control or frame
+ // state inputs, so handle those cases here.
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
inputs.push_back(NodeProperties::GetFrameStateInput(node, 0));
- inputs.push_back(NodeProperties::GetEffectInput(node));
- inputs.push_back(NodeProperties::GetControlInput(node));
}
+ Node* effect = (node->op()->EffectInputCount() > 0)
+ ? NodeProperties::GetEffectInput(node)
+ : graph()->start();
+ inputs.push_back(effect);
+ Node* control = (node->op()->ControlInputCount() > 0)
+ ? NodeProperties::GetControlInput(node)
+ : graph()->start();
+ inputs.push_back(control);
CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0,
CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
@@ -199,7 +201,7 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
node->ReplaceInput(0, booleanize);
node->ReplaceInput(1, true_value);
node->ReplaceInput(2, false_value);
- node->set_op(common()->Select(kMachAnyTagged));
+ NodeProperties::ChangeOp(node, common()->Select(kMachAnyTagged));
}
@@ -210,40 +212,7 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
isolate(), zone(), callable.descriptor(), 0, flags, properties);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
- node->set_op(common()->Call(desc));
-}
-
-
-void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
- Builtins::JavaScript id,
- int nargs) {
- Node* context_input = NodeProperties::GetContextInput(node);
- Node* effect_input = NodeProperties::GetEffectInput(node);
-
- CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- Operator::Properties properties = node->op()->properties();
- Callable callable =
- CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), nargs, flags, properties);
- Node* global_object =
- graph()->NewNode(machine()->Load(kMachAnyTagged), context_input,
- jsgraph()->IntPtrConstant(
- Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)),
- effect_input, graph()->start());
- Node* builtins_object = graph()->NewNode(
- machine()->Load(kMachAnyTagged), global_object,
- jsgraph()->IntPtrConstant(GlobalObject::kBuiltinsOffset - kHeapObjectTag),
- effect_input, graph()->start());
- Node* function = graph()->NewNode(
- machine()->Load(kMachAnyTagged), builtins_object,
- jsgraph()->IntPtrConstant(JSBuiltinsObject::OffsetOfFunctionWithId(id) -
- kHeapObjectTag),
- effect_input, graph()->start());
- Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 1, function);
- node->set_op(common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -260,7 +229,7 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
node->InsertInput(zone(), 0, jsgraph()->CEntryStubConstant(fun->result_size));
node->InsertInput(zone(), nargs + 1, ref);
node->InsertInput(zone(), nargs + 2, arity);
- node->set_op(common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -297,12 +266,14 @@ void JSGenericLowering::LowerJSToNumber(Node* node) {
void JSGenericLowering::LowerJSToString(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::TO_STRING, 1);
+ CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
+ Callable callable = CodeFactory::ToString(isolate());
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSToName(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::TO_NAME, 1);
+ ReplaceWithRuntimeCall(node, Runtime::kToName);
}
@@ -360,7 +331,7 @@ void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
const StorePropertyParameters& p = StorePropertyParametersOf(node->op());
- LanguageMode language_mode = OpParameter<LanguageMode>(node);
+ LanguageMode language_mode = p.language_mode();
// We have a special case where we do keyed stores but don't have a type
// feedback vector slot allocated to support it. In this case, install
// the megamorphic keyed store stub which needs neither vector nor slot.
@@ -437,16 +408,13 @@ void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
void JSGenericLowering::LowerJSHasProperty(Node* node) {
- ReplaceWithBuiltinCall(node, Builtins::IN, 2);
+ ReplaceWithRuntimeCall(node, Runtime::kHasProperty);
}
void JSGenericLowering::LowerJSInstanceOf(Node* node) {
CallDescriptor::Flags flags = AdjustFrameStatesForCall(node);
- InstanceofStub::Flags stub_flags = static_cast<InstanceofStub::Flags>(
- InstanceofStub::kReturnTrueFalseObject |
- InstanceofStub::kArgsInRegisters);
- Callable callable = CodeFactory::Instanceof(isolate(), stub_flags);
+ Callable callable = CodeFactory::InstanceOf(isolate());
ReplaceWithStubCall(node, callable, flags);
}
@@ -465,7 +433,7 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
node->AppendInput(zone(), graph()->start());
- node->set_op(machine()->Load(kMachAnyTagged));
+ NodeProperties::ChangeOp(node, machine()->Load(kMachAnyTagged));
}
@@ -483,8 +451,8 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
static_cast<int>(access.index()))));
- node->set_op(
- machine()->Store(StoreRepresentation(kMachAnyTagged, kFullWriteBarrier)));
+ NodeProperties::ChangeOp(node, machine()->Store(StoreRepresentation(
+ kMachAnyTagged, kFullWriteBarrier)));
}
@@ -514,11 +482,31 @@ void JSGenericLowering::LowerJSLoadDynamicContext(Node* node) {
}
+void JSGenericLowering::LowerJSCreate(Node* node) { UNIMPLEMENTED(); }
+
+
+void JSGenericLowering::LowerJSCreateArguments(Node* node) {
+ const CreateArgumentsParameters& p = CreateArgumentsParametersOf(node->op());
+ switch (p.type()) {
+ case CreateArgumentsParameters::kMappedArguments:
+ ReplaceWithRuntimeCall(node, Runtime::kNewSloppyArguments_Generic);
+ break;
+ case CreateArgumentsParameters::kUnmappedArguments:
+ ReplaceWithRuntimeCall(node, Runtime::kNewStrictArguments_Generic);
+ break;
+ case CreateArgumentsParameters::kRestArray:
+ UNIMPLEMENTED();
+ break;
+ }
+}
+
+
void JSGenericLowering::LowerJSCreateClosure(Node* node) {
CreateClosureParameters p = CreateClosureParametersOf(node->op());
- node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.shared_info()));
- node->InsertInput(zone(), 2, jsgraph()->BooleanConstant(p.pretenure()));
- ReplaceWithRuntimeCall(node, Runtime::kNewClosure);
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.shared_info()));
+ ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
+ ? Runtime::kNewClosure_Tenured
+ : Runtime::kNewClosure);
}
@@ -537,12 +525,26 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
void JSGenericLowering::LowerJSCreateCatchContext(Node* node) {
- Unique<String> name = OpParameter<Unique<String>>(node);
+ Handle<String> name = OpParameter<Handle<String>>(node);
node->InsertInput(zone(), 0, jsgraph()->HeapConstant(name));
ReplaceWithRuntimeCall(node, Runtime::kPushCatchContext);
}
+void JSGenericLowering::LowerJSCreateBlockContext(Node* node) {
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ node->InsertInput(zone(), 0, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kPushBlockContext);
+}
+
+
+void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ node->InsertInput(zone(), 1, jsgraph()->HeapConstant(scope_info));
+ ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
+}
+
+
void JSGenericLowering::LowerJSCallConstruct(Node* node) {
int arity = OpParameter<int>(node);
CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
@@ -559,7 +561,7 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
node->InsertInput(zone(), 2, actual_construct);
node->InsertInput(zone(), 3, original_construct);
node->InsertInput(zone(), 4, jsgraph()->UndefinedConstant());
- node->set_op(common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -576,7 +578,7 @@ void JSGenericLowering::LowerJSCallFunction(Node* node) {
isolate(), zone(), d, static_cast<int>(p.arity() - 1), flags);
Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
node->InsertInput(zone(), 0, stub_code);
- node->set_op(common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
@@ -792,6 +794,9 @@ void JSGenericLowering::LowerJSForInStep(Node* node) {
}
+void JSGenericLowering::LowerJSYield(Node* node) { UNIMPLEMENTED(); }
+
+
void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 9811ba8451..ffce9126df 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -36,9 +36,8 @@ class JSGenericLowering final : public Reducer {
#undef DECLARE_LOWER
// Helpers to replace existing nodes with a generic call.
- void ReplaceWithCompareIC(Node* node, Token::Value token);
+ void ReplaceWithCompareIC(Node* node, Token::Value token, Strength strength);
void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
- void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
Zone* zone() const;
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 84fcf82c84..9b6b187c6a 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -12,8 +12,7 @@ namespace internal {
namespace compiler {
Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
- Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(object);
- return graph()->NewNode(common()->HeapConstant(unique));
+ return graph()->NewNode(common()->HeapConstant(object));
}
@@ -74,23 +73,12 @@ Node* JSGraph::NaNConstant() {
}
-Node* JSGraph::HeapConstant(Unique<HeapObject> value) {
- // TODO(turbofan): canonicalize heap constants using Unique<T>
- return graph()->NewNode(common()->HeapConstant(value));
-}
-
-
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
+ // TODO(turbofan): canonicalize heap constants using <magic approach>.
// TODO(titzer): We could also match against the addresses of immortable
// immovables here, even without access to the heap, thus always
// canonicalizing references to them.
- // return HeapConstant(Unique<Object>::CreateUninitialized(value));
- // TODO(turbofan): This is a work-around to make Unique::HashCode() work for
- // value numbering. We need some sane way to compute a unique hash code for
- // arbitrary handles here.
- Unique<HeapObject> unique(reinterpret_cast<Address>(*value.location()),
- value);
- return HeapConstant(unique);
+ return graph()->NewNode(common()->HeapConstant(value));
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index c7f07d46db..4f23773259 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -11,6 +11,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
@@ -45,10 +46,6 @@ class JSGraph : public ZoneObject {
Node* OneConstant();
Node* NaNConstant();
- // Creates a HeapConstant node, possibly canonicalized, without inspecting the
- // object.
- Node* HeapConstant(Unique<HeapObject> value);
-
// Creates a HeapConstant node, possibly canonicalized, and may access the
// heap to inspect the object.
Node* HeapConstant(Handle<HeapObject> value);
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index a4f3e1d03e..0b7c78979c 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -6,6 +6,7 @@
#include "src/ast.h"
#include "src/ast-numbering.h"
+#include "src/compiler.h"
#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
@@ -14,6 +15,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/rewriter.h"
#include "src/scopes.h"
@@ -136,18 +138,17 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
switch (use->opcode()) {
case IrOpcode::kParameter: {
int index = 1 + ParameterIndexOf(use->op());
+ DCHECK_LE(index, inlinee_context_index);
if (index < inliner_inputs && index < inlinee_context_index) {
// There is an input from the call, and the index is a value
// projection but not the context, so rewire the input.
Replace(use, call->InputAt(index));
} else if (index == inlinee_context_index) {
+ // The projection is requesting the inlinee function context.
Replace(use, context);
- } else if (index < inlinee_context_index) {
+ } else {
// Call has fewer arguments than required, fill with undefined.
Replace(use, jsgraph_->UndefinedConstant());
- } else {
- // We got too many arguments, discard for now.
- // TODO(sigurds): Fix to treat arguments array correctly.
}
break;
}
@@ -178,33 +179,37 @@ Reduction JSInliner::InlineCall(Node* call, Node* context, Node* frame_state,
case IrOpcode::kDeoptimize:
case IrOpcode::kTerminate:
case IrOpcode::kThrow:
- jsgraph_->graph()->end()->AppendInput(jsgraph_->zone(), input);
- jsgraph_->graph()->end()->set_op(
- jsgraph_->common()->End(jsgraph_->graph()->end()->InputCount()));
+ NodeProperties::MergeControlToEnd(jsgraph_->graph(), jsgraph_->common(),
+ input);
break;
default:
UNREACHABLE();
break;
}
}
- DCHECK_NE(0u, values.size());
DCHECK_EQ(values.size(), effects.size());
DCHECK_EQ(values.size(), controls.size());
- int const input_count = static_cast<int>(controls.size());
- Node* control_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Merge(input_count), input_count, &controls.front());
- values.push_back(control_output);
- effects.push_back(control_output);
- Node* value_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->Phi(kMachAnyTagged, input_count),
- static_cast<int>(values.size()), &values.front());
- Node* effect_output = jsgraph_->graph()->NewNode(
- jsgraph_->common()->EffectPhi(input_count),
- static_cast<int>(effects.size()), &effects.front());
-
- ReplaceWithValue(call, value_output, effect_output, control_output);
-
- return Changed(value_output);
+
+ // Depending on whether the inlinee produces a value, we either replace value
+ // uses with said value or kill value uses if no value can be returned.
+ if (values.size() > 0) {
+ int const input_count = static_cast<int>(controls.size());
+ Node* control_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Merge(input_count), input_count, &controls.front());
+ values.push_back(control_output);
+ effects.push_back(control_output);
+ Node* value_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->Phi(kMachAnyTagged, input_count),
+ static_cast<int>(values.size()), &values.front());
+ Node* effect_output = jsgraph_->graph()->NewNode(
+ jsgraph_->common()->EffectPhi(input_count),
+ static_cast<int>(effects.size()), &effects.front());
+ ReplaceWithValue(call, value_output, effect_output, control_output);
+ return Changed(value_output);
+ } else {
+ ReplaceWithValue(call, call, call, jsgraph_->Dead());
+ return Changed(call);
+ }
}
@@ -243,9 +248,8 @@ Reduction JSInliner::Reduce(Node* node) {
HeapObjectMatcher match(call.jsfunction());
if (!match.HasValue()) return NoChange();
- if (!match.Value().handle()->IsJSFunction()) return NoChange();
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(match.Value().handle());
+ if (!match.Value()->IsJSFunction()) return NoChange();
+ Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
if (mode_ == kRestrictedInlining && !function->shared()->force_inline()) {
return NoChange();
}
@@ -296,12 +300,18 @@ Reduction JSInliner::Reduce(Node* node) {
CompilationInfo info(&parse_info);
if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
- if (!Compiler::ParseAndAnalyze(info.parse_info())) return NoChange();
- if (!Compiler::EnsureDeoptimizationSupport(&info)) return NoChange();
+ if (!Compiler::ParseAndAnalyze(info.parse_info())) {
+ TRACE("Not inlining %s into %s because parsing failed\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
+ if (info_->isolate()->has_pending_exception()) {
+ info_->isolate()->clear_pending_exception();
+ }
+ return NoChange();
+ }
- if (info.scope()->arguments() != NULL && is_sloppy(info.language_mode())) {
- // For now do not inline functions that use their arguments array.
- TRACE("Not inlining %s into %s because inlinee uses arguments array\n",
+ if (!Compiler::EnsureDeoptimizationSupport(&info)) {
+ TRACE("Not inlining %s into %s because deoptimization support failed\n",
function->shared()->DebugName()->ToCString().get(),
info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index b075024dd3..21057e61e4 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -10,8 +10,13 @@
namespace v8 {
namespace internal {
+
+// Forward declarations.
+class CompilationInfo;
+
namespace compiler {
+// Forward declarations.
class JSCallFunctionAccessor;
class JSInliner final : public AdvancedReducer {
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index e82ac205cc..219a452a7d 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -11,6 +11,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/counters.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -52,8 +54,6 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsFunction:
return ReduceIsInstanceType(node, JS_FUNCTION_TYPE);
- case Runtime::kInlineIsNonNegativeSmi:
- return ReduceIsNonNegativeSmi(node);
case Runtime::kInlineIsRegExp:
return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
case Runtime::kInlineIsSmi:
@@ -149,8 +149,8 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- node->set_op(common()->Dead());
node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
return Changed(node);
}
@@ -177,11 +177,11 @@ Reduction JSIntrinsicLowering::ReduceHeapObjectGetMap(Node* node) {
Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
if (!FLAG_native_code_counters) return ChangeToUndefined(node);
HeapObjectMatcher m(NodeProperties::GetValueInput(node, 0));
- if (!m.HasValue() || !m.Value().handle()->IsString()) {
+ if (!m.HasValue() || !m.Value()->IsString()) {
return ChangeToUndefined(node);
}
base::SmartArrayPointer<char> name =
- Handle<String>::cast(m.Value().handle())->ToCString();
+ Handle<String>::cast(m.Value())->ToCString();
StatsCounter counter(jsgraph()->isolate(), name.get());
if (!counter.Enabled()) return ChangeToUndefined(node);
@@ -239,11 +239,6 @@ Reduction JSIntrinsicLowering::ReduceIsInstanceType(
}
-Reduction JSIntrinsicLowering::ReduceIsNonNegativeSmi(Node* node) {
- return Change(node, simplified()->ObjectIsNonNegativeSmi());
-}
-
-
Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
return Change(node, simplified()->ObjectIsSmi());
}
@@ -288,12 +283,13 @@ Reduction JSIntrinsicLowering::ReduceSeqStringGetChar(
Node* node, String::Encoding encoding) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- node->set_op(
- simplified()->LoadElement(AccessBuilder::ForSeqStringChar(encoding)));
+ RelaxControls(node);
node->ReplaceInput(2, effect);
node->ReplaceInput(3, control);
node->TrimInputCount(4);
- RelaxControls(node);
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->LoadElement(AccessBuilder::ForSeqStringChar(encoding)));
return Changed(node);
}
@@ -306,16 +302,17 @@ Reduction JSIntrinsicLowering::ReduceSeqStringSetChar(
Node* string = NodeProperties::GetValueInput(node, 2);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- node->set_op(
- simplified()->StoreElement(AccessBuilder::ForSeqStringChar(encoding)));
+ ReplaceWithValue(node, string, node);
+ NodeProperties::RemoveType(node);
node->ReplaceInput(0, string);
node->ReplaceInput(1, index);
node->ReplaceInput(2, chr);
node->ReplaceInput(3, effect);
node->ReplaceInput(4, control);
node->TrimInputCount(5);
- NodeProperties::RemoveBounds(node);
- ReplaceWithValue(node, string, node);
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->StoreElement(AccessBuilder::ForSeqStringChar(encoding)));
return Changed(node);
}
@@ -342,7 +339,7 @@ Reduction JSIntrinsicLowering::ReduceUnLikely(Node* node, BranchHint hint) {
nodes_to_visit.push(use);
} else if (use->opcode() == IrOpcode::kBranch) {
// Actually set the hint on any branch using the intrinsic node.
- use->set_op(common()->Branch(hint));
+ NodeProperties::ChangeOp(use, common()->Branch(hint));
}
}
}
@@ -422,7 +419,7 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
// Remove the inputs corresponding to context, effect and control.
NodeProperties::RemoveNonValueInputs(node);
// Finally update the operator to the new one.
- node->set_op(op);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
@@ -524,14 +521,14 @@ Reduction JSIntrinsicLowering::ReduceThrowNotDateError(Node* node) {
graph()->NewNode(common()->Deoptimize(), frame_state, effect, control);
NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
- node->set_op(common()->Dead());
node->TrimInputCount(0);
+ NodeProperties::ChangeOp(node, common()->Dead());
return Changed(node);
}
Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
- node->set_op(javascript()->ToObject());
+ NodeProperties::ChangeOp(node, javascript()->ToObject());
return Changed(node);
}
@@ -539,50 +536,52 @@ Reduction JSIntrinsicLowering::ReduceToObject(Node* node) {
Reduction JSIntrinsicLowering::ReduceCallFunction(Node* node) {
CallRuntimeParameters params = OpParameter<CallRuntimeParameters>(node->op());
size_t arity = params.arity();
- node->set_op(javascript()->CallFunction(arity, NO_CALL_FUNCTION_FLAGS, STRICT,
- VectorSlotPair(), ALLOW_TAIL_CALLS));
Node* function = node->InputAt(static_cast<int>(arity - 1));
while (--arity != 0) {
node->ReplaceInput(static_cast<int>(arity),
node->InputAt(static_cast<int>(arity - 1)));
}
node->ReplaceInput(0, function);
+ NodeProperties::ChangeOp(
+ node,
+ javascript()->CallFunction(params.arity(), NO_CALL_FUNCTION_FLAGS, STRICT,
+ VectorSlotPair(), ALLOW_TAIL_CALLS));
return Changed(node);
}
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b) {
- node->set_op(op);
+ RelaxControls(node);
node->ReplaceInput(0, a);
node->ReplaceInput(1, b);
node->TrimInputCount(2);
- RelaxControls(node);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b, Node* c) {
- node->set_op(op);
+ RelaxControls(node);
node->ReplaceInput(0, a);
node->ReplaceInput(1, b);
node->ReplaceInput(2, c);
node->TrimInputCount(3);
- RelaxControls(node);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
Node* b, Node* c, Node* d) {
- node->set_op(op);
+ RelaxControls(node);
node->ReplaceInput(0, a);
node->ReplaceInput(1, b);
node->ReplaceInput(2, c);
node->ReplaceInput(3, d);
node->TrimInputCount(4);
- RelaxControls(node);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index c14882c734..15e9b4053e 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -41,7 +41,6 @@ class JSIntrinsicLowering final : public AdvancedReducer {
Reduction ReduceIncrementStatsCounter(Node* node);
Reduction ReduceIsMinusZero(Node* node);
Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
- Reduction ReduceIsNonNegativeSmi(Node* node);
Reduction ReduceIsSmi(Node* node);
Reduction ReduceJSValueGetValue(Node* node);
Reduction ReduceMapGetInstanceType(Node* node);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 6a5bdfd692..37369f6970 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -9,13 +9,24 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-feedback-vector-inl.h"
namespace v8 {
namespace internal {
namespace compiler {
+VectorSlotPair::VectorSlotPair() : slot_(FeedbackVectorICSlot::Invalid()) {}
+
+
+int VectorSlotPair::index() const {
+ return vector_.is_null() ? -1 : vector_->GetIndex(slot_);
+}
+
+
bool operator==(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
- return lhs.slot() == rhs.slot() && lhs.vector() == rhs.vector();
+ return lhs.slot() == rhs.slot() &&
+ lhs.vector().location() == rhs.vector().location();
}
@@ -25,7 +36,7 @@ bool operator!=(VectorSlotPair const& lhs, VectorSlotPair const& rhs) {
size_t hash_value(VectorSlotPair const& p) {
- return base::hash_combine(p.slot(), p.vector());
+ return base::hash_combine(p.slot(), p.vector().location());
}
@@ -196,7 +207,7 @@ DynamicContextAccess const& DynamicContextAccessOf(Operator const* op) {
bool operator==(LoadNamedParameters const& lhs,
LoadNamedParameters const& rhs) {
- return lhs.name() == rhs.name() &&
+ return lhs.name().location() == rhs.name().location() &&
lhs.language_mode() == rhs.language_mode() &&
lhs.feedback() == rhs.feedback();
}
@@ -209,12 +220,13 @@ bool operator!=(LoadNamedParameters const& lhs,
size_t hash_value(LoadNamedParameters const& p) {
- return base::hash_combine(p.name(), p.language_mode(), p.feedback());
+ return base::hash_combine(p.name().location(), p.language_mode(),
+ p.feedback());
}
std::ostream& operator<<(std::ostream& os, LoadNamedParameters const& p) {
- return os << Brief(*p.name().handle()) << ", " << p.language_mode();
+ return os << Brief(*p.name()) << ", " << p.language_mode();
}
@@ -255,7 +267,8 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op) {
bool operator==(LoadGlobalParameters const& lhs,
LoadGlobalParameters const& rhs) {
- return lhs.name() == rhs.name() && lhs.feedback() == rhs.feedback() &&
+ return lhs.name().location() == rhs.name().location() &&
+ lhs.feedback() == rhs.feedback() &&
lhs.typeof_mode() == rhs.typeof_mode() &&
lhs.slot_index() == rhs.slot_index();
}
@@ -268,12 +281,13 @@ bool operator!=(LoadGlobalParameters const& lhs,
size_t hash_value(LoadGlobalParameters const& p) {
- return base::hash_combine(p.name(), p.typeof_mode(), p.slot_index());
+ return base::hash_combine(p.name().location(), p.typeof_mode(),
+ p.slot_index());
}
std::ostream& operator<<(std::ostream& os, LoadGlobalParameters const& p) {
- return os << Brief(*p.name().handle()) << ", " << p.typeof_mode()
+ return os << Brief(*p.name()) << ", " << p.typeof_mode()
<< ", slot: " << p.slot_index();
}
@@ -287,7 +301,8 @@ const LoadGlobalParameters& LoadGlobalParametersOf(const Operator* op) {
bool operator==(StoreGlobalParameters const& lhs,
StoreGlobalParameters const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
- lhs.name() == rhs.name() && lhs.feedback() == rhs.feedback() &&
+ lhs.name().location() == rhs.name().location() &&
+ lhs.feedback() == rhs.feedback() &&
lhs.slot_index() == rhs.slot_index();
}
@@ -299,13 +314,13 @@ bool operator!=(StoreGlobalParameters const& lhs,
size_t hash_value(StoreGlobalParameters const& p) {
- return base::hash_combine(p.language_mode(), p.name(), p.feedback(),
- p.slot_index());
+ return base::hash_combine(p.language_mode(), p.name().location(),
+ p.feedback(), p.slot_index());
}
std::ostream& operator<<(std::ostream& os, StoreGlobalParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name().handle())
+ return os << p.language_mode() << ", " << Brief(*p.name())
<< ", slot: " << p.slot_index();
}
@@ -319,7 +334,8 @@ const StoreGlobalParameters& StoreGlobalParametersOf(const Operator* op) {
bool operator==(StoreNamedParameters const& lhs,
StoreNamedParameters const& rhs) {
return lhs.language_mode() == rhs.language_mode() &&
- lhs.name() == rhs.name() && lhs.feedback() == rhs.feedback();
+ lhs.name().location() == rhs.name().location() &&
+ lhs.feedback() == rhs.feedback();
}
@@ -330,12 +346,13 @@ bool operator!=(StoreNamedParameters const& lhs,
size_t hash_value(StoreNamedParameters const& p) {
- return base::hash_combine(p.language_mode(), p.name(), p.feedback());
+ return base::hash_combine(p.language_mode(), p.name().location(),
+ p.feedback());
}
std::ostream& operator<<(std::ostream& os, StoreNamedParameters const& p) {
- return os << p.language_mode() << ", " << Brief(*p.name().handle());
+ return os << p.language_mode() << ", " << Brief(*p.name());
}
@@ -374,6 +391,35 @@ const StorePropertyParameters& StorePropertyParametersOf(const Operator* op) {
}
+bool operator==(CreateArgumentsParameters const& lhs,
+ CreateArgumentsParameters const& rhs) {
+ return lhs.type() == rhs.type() && lhs.start_index() == rhs.start_index();
+}
+
+
+bool operator!=(CreateArgumentsParameters const& lhs,
+ CreateArgumentsParameters const& rhs) {
+ return !(lhs == rhs);
+}
+
+
+size_t hash_value(CreateArgumentsParameters const& p) {
+ return base::hash_combine(p.type(), p.start_index());
+}
+
+
+std::ostream& operator<<(std::ostream& os, CreateArgumentsParameters const& p) {
+ return os << p.type() << ", " << p.start_index();
+}
+
+
+const CreateArgumentsParameters& CreateArgumentsParametersOf(
+ const Operator* op) {
+ DCHECK_EQ(IrOpcode::kJSCreateArguments, op->opcode());
+ return OpParameter<CreateArgumentsParameters>(op);
+}
+
+
bool operator==(CreateClosureParameters const& lhs,
CreateClosureParameters const& rhs) {
return lhs.pretenure() == rhs.pretenure() &&
@@ -408,10 +454,10 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
#define CACHED_OP_LIST(V) \
V(Equal, Operator::kNoProperties, 2, 1) \
V(NotEqual, Operator::kNoProperties, 2, 1) \
- V(StrictEqual, Operator::kPure, 2, 1) \
- V(StrictNotEqual, Operator::kPure, 2, 1) \
- V(UnaryNot, Operator::kPure, 1, 1) \
- V(ToBoolean, Operator::kPure, 1, 1) \
+ V(StrictEqual, Operator::kNoThrow, 2, 1) \
+ V(StrictNotEqual, Operator::kNoThrow, 2, 1) \
+ V(UnaryNot, Operator::kEliminatable, 1, 1) \
+ V(ToBoolean, Operator::kEliminatable, 1, 1) \
V(ToNumber, Operator::kNoProperties, 1, 1) \
V(ToString, Operator::kNoProperties, 1, 1) \
V(ToName, Operator::kNoProperties, 1, 1) \
@@ -419,7 +465,7 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
V(Yield, Operator::kNoProperties, 1, 1) \
V(Create, Operator::kEliminatable, 0, 1) \
V(HasProperty, Operator::kNoProperties, 2, 1) \
- V(TypeOf, Operator::kPure, 1, 1) \
+ V(TypeOf, Operator::kEliminatable, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
V(ForInDone, Operator::kPure, 2, 1) \
V(ForInNext, Operator::kNoProperties, 4, 1) \
@@ -428,9 +474,7 @@ const CreateClosureParameters& CreateClosureParametersOf(const Operator* op) {
V(StackCheck, Operator::kNoProperties, 0, 0) \
V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
V(CreateWithContext, Operator::kNoProperties, 2, 1) \
- V(CreateBlockContext, Operator::kNoProperties, 2, 1) \
- V(CreateModuleContext, Operator::kNoProperties, 2, 1) \
- V(CreateScriptContext, Operator::kNoProperties, 2, 1)
+ V(CreateModuleContext, Operator::kNoProperties, 2, 1)
#define CACHED_OP_LIST_WITH_LANGUAGE_MODE(V) \
@@ -559,7 +603,7 @@ const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
}
-const Operator* JSOperatorBuilder::LoadNamed(const Unique<Name>& name,
+const Operator* JSOperatorBuilder::LoadNamed(const Handle<Name>& name,
const VectorSlotPair& feedback,
LanguageMode language_mode) {
LoadNamedParameters parameters(name, feedback, language_mode);
@@ -583,7 +627,7 @@ const Operator* JSOperatorBuilder::LoadProperty(const VectorSlotPair& feedback,
const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
- const Unique<Name>& name,
+ const Handle<Name>& name,
const VectorSlotPair& feedback) {
StoreNamedParameters parameters(language_mode, feedback, name);
return new (zone()) Operator1<StoreNamedParameters>( // --
@@ -614,7 +658,7 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
}
-const Operator* JSOperatorBuilder::LoadGlobal(const Unique<Name>& name,
+const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode,
int slot_index) {
@@ -628,7 +672,7 @@ const Operator* JSOperatorBuilder::LoadGlobal(const Unique<Name>& name,
const Operator* JSOperatorBuilder::StoreGlobal(LanguageMode language_mode,
- const Unique<Name>& name,
+ const Handle<Name>& name,
const VectorSlotPair& feedback,
int slot_index) {
StoreGlobalParameters parameters(language_mode, feedback, name, slot_index);
@@ -688,13 +732,25 @@ const Operator* JSOperatorBuilder::LoadDynamicContext(
}
+const Operator* JSOperatorBuilder::CreateArguments(
+ CreateArgumentsParameters::Type type, int start_index) {
+ DCHECK_IMPLIES(start_index, type == CreateArgumentsParameters::kRestArray);
+ CreateArgumentsParameters parameters(type, start_index);
+ return new (zone()) Operator1<CreateArgumentsParameters>( // --
+ IrOpcode::kJSCreateArguments, Operator::kNoThrow, // opcode
+ "JSCreateArguments", // name
+ 1, 1, 1, 1, 1, 0, // counts
+ parameters); // parameter
+}
+
+
const Operator* JSOperatorBuilder::CreateClosure(
Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
CreateClosureParameters parameters(shared_info, pretenure);
return new (zone()) Operator1<CreateClosureParameters>( // --
IrOpcode::kJSCreateClosure, Operator::kNoThrow, // opcode
"JSCreateClosure", // name
- 1, 1, 1, 1, 1, 0, // counts
+ 0, 1, 1, 1, 1, 0, // counts
parameters); // parameter
}
@@ -718,14 +774,37 @@ const Operator* JSOperatorBuilder::CreateLiteralObject(int literal_flags) {
const Operator* JSOperatorBuilder::CreateCatchContext(
- const Unique<String>& name) {
- return new (zone()) Operator1<Unique<String>>( // --
+ const Handle<String>& name) {
+ return new (zone()) Operator1<Handle<String>, Handle<String>::equal_to,
+ Handle<String>::hash>( // --
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
2, 1, 1, 1, 1, 2, // counts
name); // parameter
}
+
+const Operator* JSOperatorBuilder::CreateBlockContext(
+ const Handle<ScopeInfo>& scpope_info) {
+ return new (zone()) Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
+ Handle<ScopeInfo>::hash>( // --
+ IrOpcode::kJSCreateBlockContext, Operator::kNoProperties, // opcode
+ "JSCreateBlockContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ scpope_info); // parameter
+}
+
+
+const Operator* JSOperatorBuilder::CreateScriptContext(
+ const Handle<ScopeInfo>& scpope_info) {
+ return new (zone()) Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
+ Handle<ScopeInfo>::hash>( // --
+ IrOpcode::kJSCreateScriptContext, Operator::kNoProperties, // opcode
+ "JSCreateScriptContext", // name
+ 1, 1, 1, 1, 1, 2, // counts
+ scpope_info); // parameter
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index 5afbfdf6fa..88b2dd304e 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_JS_OPERATOR_H_
#include "src/runtime/runtime.h"
-#include "src/unique.h"
namespace v8 {
namespace internal {
@@ -21,22 +20,19 @@ struct JSOperatorGlobalCache;
// is used to access the type feedback for a certain {Node}.
class VectorSlotPair {
public:
- VectorSlotPair() : slot_(FeedbackVectorICSlot::Invalid()) {}
+ VectorSlotPair();
VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
: vector_(vector), slot_(slot) {}
bool IsValid() const { return !vector_.is_null(); }
- MaybeHandle<TypeFeedbackVector> vector() const { return vector_; }
+ Handle<TypeFeedbackVector> vector() const { return vector_; }
FeedbackVectorICSlot slot() const { return slot_; }
- int index() const {
- Handle<TypeFeedbackVector> vector;
- return vector_.ToHandle(&vector) ? vector->GetIndex(slot_) : -1;
- }
+ int index() const;
private:
- const MaybeHandle<TypeFeedbackVector> vector_;
+ const Handle<TypeFeedbackVector> vector_;
const FeedbackVectorICSlot slot_;
};
@@ -236,17 +232,17 @@ DynamicContextAccess const& DynamicContextAccessOf(Operator const*);
// used as a parameter by JSLoadNamed operators.
class LoadNamedParameters final {
public:
- LoadNamedParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
+ LoadNamedParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
LanguageMode language_mode)
: name_(name), feedback_(feedback), language_mode_(language_mode) {}
- const Unique<Name>& name() const { return name_; }
+ const Handle<Name>& name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
private:
- const Unique<Name> name_;
+ const Handle<Name> name_;
const VectorSlotPair feedback_;
const LanguageMode language_mode_;
};
@@ -265,14 +261,14 @@ const LoadNamedParameters& LoadNamedParametersOf(const Operator* op);
// used as a parameter by JSLoadGlobal operator.
class LoadGlobalParameters final {
public:
- LoadGlobalParameters(const Unique<Name>& name, const VectorSlotPair& feedback,
+ LoadGlobalParameters(const Handle<Name>& name, const VectorSlotPair& feedback,
TypeofMode typeof_mode, int slot_index)
: name_(name),
feedback_(feedback),
typeof_mode_(typeof_mode),
slot_index_(slot_index) {}
- const Unique<Name>& name() const { return name_; }
+ const Handle<Name>& name() const { return name_; }
TypeofMode typeof_mode() const { return typeof_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
@@ -280,7 +276,7 @@ class LoadGlobalParameters final {
int slot_index() const { return slot_index_; }
private:
- const Unique<Name> name_;
+ const Handle<Name> name_;
const VectorSlotPair feedback_;
const TypeofMode typeof_mode_;
const int slot_index_;
@@ -302,7 +298,7 @@ class StoreGlobalParameters final {
public:
StoreGlobalParameters(LanguageMode language_mode,
const VectorSlotPair& feedback,
- const Unique<Name>& name, int slot_index)
+ const Handle<Name>& name, int slot_index)
: language_mode_(language_mode),
name_(name),
feedback_(feedback),
@@ -310,12 +306,12 @@ class StoreGlobalParameters final {
LanguageMode language_mode() const { return language_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
- const Unique<Name>& name() const { return name_; }
+ const Handle<Name>& name() const { return name_; }
int slot_index() const { return slot_index_; }
private:
const LanguageMode language_mode_;
- const Unique<Name> name_;
+ const Handle<Name> name_;
const VectorSlotPair feedback_;
int slot_index_;
};
@@ -362,16 +358,16 @@ const LoadPropertyParameters& LoadPropertyParametersOf(const Operator* op);
class StoreNamedParameters final {
public:
StoreNamedParameters(LanguageMode language_mode,
- const VectorSlotPair& feedback, const Unique<Name>& name)
+ const VectorSlotPair& feedback, const Handle<Name>& name)
: language_mode_(language_mode), name_(name), feedback_(feedback) {}
LanguageMode language_mode() const { return language_mode_; }
const VectorSlotPair& feedback() const { return feedback_; }
- const Unique<Name>& name() const { return name_; }
+ const Handle<Name>& name() const { return name_; }
private:
const LanguageMode language_mode_;
- const Unique<Name> name_;
+ const Handle<Name> name_;
const VectorSlotPair feedback_;
};
@@ -411,6 +407,35 @@ std::ostream& operator<<(std::ostream&, StorePropertyParameters const&);
const StorePropertyParameters& StorePropertyParametersOf(const Operator* op);
+// Defines specifics about arguments object or rest parameter creation. This is
+// used as a parameter by JSCreateArguments operators.
+class CreateArgumentsParameters final {
+ public:
+ enum Type { kMappedArguments, kUnmappedArguments, kRestArray };
+ CreateArgumentsParameters(Type type, int start_index)
+ : type_(type), start_index_(start_index) {}
+
+ Type type() const { return type_; }
+ int start_index() const { return start_index_; }
+
+ private:
+ const Type type_;
+ const int start_index_;
+};
+
+bool operator==(CreateArgumentsParameters const&,
+ CreateArgumentsParameters const&);
+bool operator!=(CreateArgumentsParameters const&,
+ CreateArgumentsParameters const&);
+
+size_t hash_value(CreateArgumentsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CreateArgumentsParameters const&);
+
+const CreateArgumentsParameters& CreateArgumentsParametersOf(
+ const Operator* op);
+
+
// Defines shared information for the closure that should be created. This is
// used as a parameter by JSCreateClosure operators.
class CreateClosureParameters final {
@@ -473,6 +498,8 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* Yield();
const Operator* Create();
+ const Operator* CreateArguments(CreateArgumentsParameters::Type type,
+ int start_index);
const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
PretenureFlag pretenure);
const Operator* CreateLiteralArray(int literal_flags);
@@ -488,26 +515,26 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* LoadProperty(const VectorSlotPair& feedback,
LanguageMode language_mode);
- const Operator* LoadNamed(const Unique<Name>& name,
+ const Operator* LoadNamed(const Handle<Name>& name,
const VectorSlotPair& feedback,
LanguageMode language_mode);
const Operator* StoreProperty(LanguageMode language_mode,
const VectorSlotPair& feedback);
const Operator* StoreNamed(LanguageMode language_mode,
- const Unique<Name>& name,
+ const Handle<Name>& name,
const VectorSlotPair& feedback);
const Operator* DeleteProperty(LanguageMode language_mode);
const Operator* HasProperty();
- const Operator* LoadGlobal(const Unique<Name>& name,
+ const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF,
int slot_index = -1);
const Operator* StoreGlobal(LanguageMode language_mode,
- const Unique<Name>& name,
+ const Handle<Name>& name,
const VectorSlotPair& feedback,
int slot_index = -1);
@@ -532,13 +559,12 @@ class JSOperatorBuilder final : public ZoneObject {
const Operator* StackCheck();
- // TODO(titzer): nail down the static parts of each of these context flavors.
const Operator* CreateFunctionContext();
- const Operator* CreateCatchContext(const Unique<String>& name);
+ const Operator* CreateCatchContext(const Handle<String>& name);
const Operator* CreateWithContext();
- const Operator* CreateBlockContext();
+ const Operator* CreateBlockContext(const Handle<ScopeInfo>& scpope_info);
const Operator* CreateModuleContext();
- const Operator* CreateScriptContext();
+ const Operator* CreateScriptContext(const Handle<ScopeInfo>& scpope_info);
private:
Zone* zone() const { return zone_; }
diff --git a/deps/v8/src/compiler/js-type-feedback-lowering.cc b/deps/v8/src/compiler/js-type-feedback-lowering.cc
index 2522a7af07..d97a305d08 100644
--- a/deps/v8/src/compiler/js-type-feedback-lowering.cc
+++ b/deps/v8/src/compiler/js-type-feedback-lowering.cc
@@ -7,6 +7,8 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/type-feedback-vector.h"
namespace v8 {
namespace internal {
@@ -34,17 +36,16 @@ Reduction JSTypeFeedbackLowering::Reduce(Node* node) {
Reduction JSTypeFeedbackLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Type* receiver_type = NodeProperties::GetBounds(receiver).upper;
+ Type* receiver_type = NodeProperties::GetType(receiver);
Node* frame_state = NodeProperties::GetFrameStateInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
// We need to make optimistic assumptions to continue.
if (!(flags() & kDeoptimizationEnabled)) return NoChange();
LoadNamedParameters const& p = LoadNamedParametersOf(node->op());
- Handle<TypeFeedbackVector> vector;
- if (!p.feedback().vector().ToHandle(&vector)) return NoChange();
- if (p.name().handle().is_identical_to(factory()->length_string())) {
- LoadICNexus nexus(vector, p.feedback().slot());
+ if (p.feedback().vector().is_null()) return NoChange();
+ if (p.name().is_identical_to(factory()->length_string())) {
+ LoadICNexus nexus(p.feedback().vector(), p.feedback().slot());
MapHandleList maps;
if (nexus.ExtractMaps(&maps) > 0) {
for (Handle<Map> map : maps) {
diff --git a/deps/v8/src/compiler/js-type-feedback.cc b/deps/v8/src/compiler/js-type-feedback.cc
index 432e2d0366..395a7dccca 100644
--- a/deps/v8/src/compiler/js-type-feedback.cc
+++ b/deps/v8/src/compiler/js-type-feedback.cc
@@ -146,7 +146,6 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
if (frame_state_before == nullptr) return NoChange();
const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- Handle<Name> name = p.name().handle();
SmallMapList maps;
FeedbackVectorICSlot slot = js_type_feedback_->FindFeedbackVectorICSlot(node);
@@ -155,7 +154,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
// No type feedback ids or the load is uninitialized.
return NoChange();
}
- oracle()->PropertyReceiverTypes(slot, name, &maps);
+ oracle()->PropertyReceiverTypes(slot, p.name(), &maps);
Node* receiver = node->InputAt(0);
Node* effect = NodeProperties::GetEffectInput(node);
@@ -165,7 +164,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
Handle<Map> map = maps.first();
FieldAccess field_access;
- if (!GetInObjectFieldAccess(LOAD, map, name, &field_access)) {
+ if (!GetInObjectFieldAccess(LOAD, map, p.name(), &field_access)) {
return NoChange();
}
@@ -191,7 +190,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
Reduction JSTypeFeedbackSpecializer::ReduceJSLoadGlobal(Node* node) {
DCHECK(node->opcode() == IrOpcode::kJSLoadGlobal);
Handle<String> name =
- Handle<String>::cast(LoadGlobalParametersOf(node->op()).name().handle());
+ Handle<String>::cast(LoadGlobalParametersOf(node->op()).name());
// Try to optimize loads from the global object.
Handle<Object> constant_value =
jsgraph()->isolate()->factory()->GlobalConstantFor(name);
@@ -267,7 +266,6 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
if (frame_state_before == nullptr) return NoChange();
const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
- Handle<Name> name = p.name().handle();
SmallMapList maps;
TypeFeedbackId id = js_type_feedback_->FindTypeFeedbackId(node);
if (id.IsNone() || oracle()->StoreIsUninitialized(id) == UNINITIALIZED) {
@@ -275,7 +273,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
// TODO(titzer): no feedback from vector ICs from stores.
return NoChange();
} else {
- oracle()->AssignmentReceiverTypes(id, name, &maps);
+ oracle()->AssignmentReceiverTypes(id, p.name(), &maps);
}
Node* receiver = node->InputAt(0);
@@ -287,7 +285,7 @@ Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
Handle<Map> map = maps.first();
FieldAccess field_access;
- if (!GetInObjectFieldAccess(STORE, map, name, &field_access)) {
+ if (!GetInObjectFieldAccess(STORE, map, p.name(), &field_access)) {
return NoChange();
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 628e7e07b6..7c25afcfaf 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -61,7 +61,8 @@ class AllocationBuilder final {
void AllocateArray(int length, Handle<Map> map) {
Allocate(FixedArray::SizeFor(length));
Store(AccessBuilder::ForMap(), map);
- Store(AccessBuilder::ForFixedArrayLength(), jsgraph()->Constant(length));
+ Store(AccessBuilder::ForFixedArrayLength(graph()->zone()),
+ jsgraph()->Constant(length));
}
// Compound store of a constant into a field.
@@ -162,12 +163,12 @@ class JSBinopReduction final {
// Remove the inputs corresponding to context, effect, and control.
NodeProperties::RemoveNonValueInputs(node_);
// Finally, update the operator to the new one.
- node_->set_op(op);
+ NodeProperties::ChangeOp(node_, op);
// TODO(jarin): Replace the explicit typing hack with a call to some method
// that encapsulates changing the operator and re-typing.
- Bounds const bounds = NodeProperties::GetBounds(node_);
- NodeProperties::SetBounds(node_, Bounds::NarrowUpper(bounds, type, zone()));
+ Type* node_type = NodeProperties::GetType(node_);
+ NodeProperties::SetType(node_, Type::Intersect(node_type, type, zone()));
if (invert) {
// Insert an boolean not to invert the value.
@@ -180,6 +181,32 @@ class JSBinopReduction final {
return lowering_->Changed(node_);
}
+ Reduction ChangeToStringComparisonOperator(const Operator* op,
+ bool invert = false) {
+ if (node_->op()->ControlInputCount() > 0) {
+ lowering_->RelaxControls(node_);
+ }
+ // String comparison operators need effect and control inputs, so copy them
+ // over.
+ Node* effect = NodeProperties::GetEffectInput(node_);
+ Node* control = NodeProperties::GetControlInput(node_);
+ node_->ReplaceInput(2, effect);
+ node_->ReplaceInput(3, control);
+
+ node_->TrimInputCount(4);
+ NodeProperties::ChangeOp(node_, op);
+
+ if (invert) {
+ // Insert a boolean-not to invert the value.
+ Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+ node_->ReplaceUses(value);
+ // Note: ReplaceUses() smashes all uses, so smash it back here.
+ value->ReplaceInput(0, node_);
+ return lowering_->Replace(value);
+ }
+ return lowering_->Changed(node_);
+ }
+
Reduction ChangeToPureOperator(const Operator* op, Type* type) {
return ChangeToPureOperator(op, false, type);
}
@@ -205,12 +232,8 @@ class JSBinopReduction final {
Node* context() { return NodeProperties::GetContextInput(node_); }
Node* left() { return NodeProperties::GetValueInput(node_, 0); }
Node* right() { return NodeProperties::GetValueInput(node_, 1); }
- Type* left_type() {
- return NodeProperties::GetBounds(node_->InputAt(0)).upper;
- }
- Type* right_type() {
- return NodeProperties::GetBounds(node_->InputAt(1)).upper;
- }
+ Type* left_type() { return NodeProperties::GetType(node_->InputAt(0)); }
+ Type* right_type() { return NodeProperties::GetType(node_->InputAt(1)); }
SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
Graph* graph() const { return lowering_->graph(); }
@@ -304,7 +327,7 @@ class JSBinopReduction final {
}
Node* ConvertPlainPrimitiveToNumber(Node* node) {
- DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive()));
+ DCHECK(NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
// Avoid inserting too many eager ToNumber() operations.
Reduction const reduction = lowering_->ReduceJSToNumberInput(node);
if (reduction.Changed()) return reduction.replacement();
@@ -315,7 +338,7 @@ class JSBinopReduction final {
}
Node* ConvertSingleInputToNumber(Node* node, Node* frame_state) {
- DCHECK(!NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive()));
+ DCHECK(!NodeProperties::GetType(node)->Is(Type::PlainPrimitive()));
Node* const n = graph()->NewNode(javascript()->ToNumber(), node, context(),
frame_state, effect(), control());
NodeProperties::ReplaceUses(node_, node_, node_, n, n);
@@ -362,10 +385,10 @@ class JSBinopReduction final {
if (NodeProperties::IsEffectEdge(edge)) edge.UpdateTo(exception_effect);
if (NodeProperties::IsValueEdge(edge)) edge.UpdateTo(exception_value);
}
- NodeProperties::RemoveBounds(exception_merge);
+ NodeProperties::RemoveType(exception_merge);
exception_merge->ReplaceInput(0, left_exception);
exception_merge->ReplaceInput(1, right_exception);
- exception_merge->set_op(common()->Merge(2));
+ NodeProperties::ChangeOp(exception_merge, common()->Merge(2));
*left_result = left_conv;
*right_result = right_conv;
@@ -373,7 +396,7 @@ class JSBinopReduction final {
Node* ConvertToUI32(Node* node, Signedness signedness) {
// Avoid introducing too many eager NumberToXXnt32() operations.
- Type* type = NodeProperties::GetBounds(node).upper;
+ Type* type = NodeProperties::GetType(node);
if (signedness == kSigned) {
if (!type->Is(Type::Signed32())) {
node = graph()->NewNode(simplified()->NumberToInt32(), node);
@@ -416,7 +439,7 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) + 1);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->set_op(common()->Call(desc));
+ NodeProperties::ChangeOp(node, common()->Call(desc));
return Changed(node);
}
return NoChange();
@@ -506,7 +529,8 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
default:
return NoChange();
}
- return r.ChangeToPureOperator(stringOp);
+ r.ChangeToStringComparisonOperator(stringOp);
+ return Changed(node);
}
if (r.OneInputCannotBe(Type::StringOrReceiver())) {
const Operator* less_than;
@@ -560,7 +584,8 @@ Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
+ invert);
}
if (r.BothInputsAre(Type::Receiver())) {
return r.ChangeToPureOperator(
@@ -579,7 +604,7 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
// x === x is always true if x != NaN
if (!r.left_type()->Maybe(Type::NaN())) {
Node* replacement = jsgraph()->BooleanConstant(!invert);
- Replace(node, replacement);
+ ReplaceWithValue(node, replacement);
return Replace(replacement);
}
}
@@ -588,7 +613,7 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
// empty type intersection means the values cannot be strictly equal.
if (!r.left_type()->Maybe(r.right_type())) {
Node* replacement = jsgraph()->BooleanConstant(invert);
- Replace(node, replacement);
+ ReplaceWithValue(node, replacement);
return Replace(replacement);
}
}
@@ -617,7 +642,8 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
invert);
}
if (r.BothInputsAre(Type::String())) {
- return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+ return r.ChangeToStringComparisonOperator(simplified()->StringEqual(),
+ invert);
}
if (r.BothInputsAre(Type::Number())) {
return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
@@ -629,17 +655,19 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
Node* const input = node->InputAt(0);
- Type* const input_type = NodeProperties::GetBounds(input).upper;
+ Type* const input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::Boolean())) {
// JSUnaryNot(x:boolean) => BooleanNot(x)
- node->set_op(simplified()->BooleanNot());
+ RelaxEffectsAndControls(node);
node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
return Changed(node);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSUnaryNot(x:number) => NumberEqual(x,#0)
- node->set_op(simplified()->NumberEqual());
+ RelaxEffectsAndControls(node);
node->ReplaceInput(1, jsgraph()->ZeroConstant());
- DCHECK_EQ(2, node->InputCount());
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, simplified()->NumberEqual());
return Changed(node);
} else if (input_type->Is(Type::String())) {
// JSUnaryNot(x:string) => NumberEqual(x.length,#0)
@@ -648,11 +676,11 @@ Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
// chain) because we assume String::length to be immutable.
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
graph()->start(), graph()->start());
- node->set_op(simplified()->NumberEqual());
+ ReplaceWithValue(node, node, length);
node->ReplaceInput(0, length);
node->ReplaceInput(1, jsgraph()->ZeroConstant());
- ReplaceWithValue(node, node, length);
- DCHECK_EQ(2, node->InputCount());
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, simplified()->NumberEqual());
return Changed(node);
}
return NoChange();
@@ -661,16 +689,18 @@ Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
Node* const input = node->InputAt(0);
- Type* const input_type = NodeProperties::GetBounds(input).upper;
+ Type* const input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::Boolean())) {
// JSToBoolean(x:boolean) => x
+ ReplaceWithValue(node, input);
return Replace(input);
} else if (input_type->Is(Type::OrderedNumber())) {
// JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
- node->set_op(simplified()->BooleanNot());
+ RelaxEffectsAndControls(node);
node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
jsgraph()->ZeroConstant()));
node->TrimInputCount(1);
+ NodeProperties::ChangeOp(node, simplified()->BooleanNot());
return Changed(node);
} else if (input_type->Is(Type::String())) {
// JSToBoolean(x:string) => NumberLessThan(#0,x.length)
@@ -679,10 +709,11 @@ Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
// chain) because we assume String::length to be immutable.
Node* length = graph()->NewNode(simplified()->LoadField(access), input,
graph()->start(), graph()->start());
- node->set_op(simplified()->NumberLessThan());
+ ReplaceWithValue(node, node, length);
node->ReplaceInput(0, jsgraph()->ZeroConstant());
node->ReplaceInput(1, length);
- DCHECK_EQ(2, node->InputCount());
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, simplified()->NumberLessThan());
return Changed(node);
}
return NoChange();
@@ -697,7 +728,7 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
return Changed(input); // JSToNumber(JSToNumber(x)) => JSToNumber(x)
}
// Check if we have a cached conversion.
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::Number())) {
// JSToNumber(x:number) => x
return Changed(input);
@@ -727,7 +758,7 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
ReplaceWithValue(node, reduction.replacement());
return reduction;
}
- Type* const input_type = NodeProperties::GetBounds(input).upper;
+ Type* const input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::PlainPrimitive())) {
if (NodeProperties::GetContextInput(node) !=
jsgraph()->NoContextConstant() ||
@@ -756,7 +787,7 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
if (result.Changed()) return result;
return Changed(input); // JSToString(JSToString(x)) => JSToString(x)
}
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Type* input_type = NodeProperties::GetType(input);
if (input_type->Is(Type::String())) {
return Changed(input); // JSToString(x:string) => x
}
@@ -786,7 +817,7 @@ Reduction JSTypedLowering::ReduceJSToString(Node* node) {
Reduction JSTypedLowering::ReduceJSLoadGlobal(Node* node) {
// Optimize global constants like "undefined", "Infinity", and "NaN".
- Handle<Name> name = LoadGlobalParametersOf(node->op()).name().handle();
+ Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
Handle<Object> constant_value = factory()->GlobalConstantFor(name);
if (!constant_value.is_null()) {
Node* constant = jsgraph()->Constant(constant_value);
@@ -800,10 +831,10 @@ Reduction JSTypedLowering::ReduceJSLoadGlobal(Node* node) {
Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
Node* receiver = NodeProperties::GetValueInput(node, 0);
- Type* receiver_type = NodeProperties::GetBounds(receiver).upper;
+ Type* receiver_type = NodeProperties::GetType(receiver);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- Handle<Name> name = LoadNamedParametersOf(node->op()).name().handle();
+ Handle<Name> name = LoadNamedParametersOf(node->op()).name();
// Optimize "length" property of strings.
if (name.is_identical_to(factory()->length_string()) &&
receiver_type->Is(Type::String())) {
@@ -821,11 +852,11 @@ Reduction JSTypedLowering::ReduceJSLoadNamed(Node* node) {
Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
- Type* key_type = NodeProperties::GetBounds(key).upper;
+ Type* key_type = NodeProperties::GetType(key);
HeapObjectMatcher mbase(base);
- if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+ if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
- Handle<JSTypedArray>::cast(mbase.Value().handle());
+ Handle<JSTypedArray>::cast(mbase.Value());
if (!array->GetBuffer()->was_neutered()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
@@ -866,12 +897,12 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
Node* key = NodeProperties::GetValueInput(node, 1);
Node* base = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 2);
- Type* key_type = NodeProperties::GetBounds(key).upper;
- Type* value_type = NodeProperties::GetBounds(value).upper;
+ Type* key_type = NodeProperties::GetType(key);
+ Type* value_type = NodeProperties::GetType(value);
HeapObjectMatcher mbase(base);
- if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+ if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
Handle<JSTypedArray> const array =
- Handle<JSTypedArray>::cast(mbase.Value().handle());
+ Handle<JSTypedArray>::cast(mbase.Value());
if (!array->GetBuffer()->was_neutered()) {
array->GetBuffer()->set_is_neuterable(false);
BufferAccess const access(array->type());
@@ -911,21 +942,23 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
}
// Check if we can avoid the bounds check.
if (key_type->Min() >= 0 && key_type->Max() < array->length_value()) {
- node->set_op(simplified()->StoreElement(
- AccessBuilder::ForTypedArrayElement(array->type(), true)));
+ RelaxControls(node);
node->ReplaceInput(0, buffer);
DCHECK_EQ(key, node->InputAt(1));
node->ReplaceInput(2, value);
node->ReplaceInput(3, effect);
node->ReplaceInput(4, control);
node->TrimInputCount(5);
- RelaxControls(node);
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->StoreElement(
+ AccessBuilder::ForTypedArrayElement(array->type(), true)));
return Changed(node);
}
// Compute byte offset.
Node* offset = Word32Shl(key, static_cast<int>(k));
// Turn into a StoreBuffer operation.
- node->set_op(simplified()->StoreBuffer(access));
+ RelaxControls(node);
node->ReplaceInput(0, buffer);
node->ReplaceInput(1, offset);
node->ReplaceInput(2, length);
@@ -933,7 +966,7 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
node->ReplaceInput(4, effect);
node->ReplaceInput(5, control);
node->TrimInputCount(6);
- RelaxControls(node);
+ NodeProperties::ChangeOp(node, simplified()->StoreBuffer(access));
return Changed(node);
}
}
@@ -954,11 +987,11 @@ Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
NodeProperties::GetValueInput(node, 0), effect, control));
}
- node->set_op(
- simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
node->ReplaceInput(1, effect);
node->ReplaceInput(2, control);
- DCHECK_EQ(3, node->InputCount());
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
return Changed(node);
}
@@ -975,10 +1008,10 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
NodeProperties::GetValueInput(node, 0), effect, control));
}
- node->set_op(
- simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
node->RemoveInput(2);
- DCHECK_EQ(4, node->InputCount());
+ NodeProperties::ChangeOp(
+ node,
+ simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
return Changed(node);
}
@@ -1010,18 +1043,19 @@ Reduction JSTypedLowering::ReduceJSLoadDynamicGlobal(Node* node) {
check_true);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- check_false->set_op(common()->Merge(check_false->InputCount() + 1));
check_false->AppendInput(graph()->zone(), if_false);
+ NodeProperties::ChangeOp(check_false,
+ common()->Merge(check_false->InputCount()));
check_true = if_true;
}
// Fast case, because variable is not shadowed. Perform global object load.
- Unique<Name> name = Unique<Name>::CreateUninitialized(access.name());
Node* global = graph()->NewNode(
javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true), context,
context, effect);
Node* fast = graph()->NewNode(
- javascript()->LoadGlobal(name, access.feedback(), access.typeof_mode()),
+ javascript()->LoadGlobal(access.name(), access.feedback(),
+ access.typeof_mode()),
context, global, vector, context, state1, state2, global, check_true);
// Slow case, because variable potentially shadowed. Perform dynamic lookup.
@@ -1069,8 +1103,9 @@ Reduction JSTypedLowering::ReduceJSLoadDynamicContext(Node* node) {
check_true);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
- check_false->set_op(common()->Merge(check_false->InputCount() + 1));
check_false->AppendInput(graph()->zone(), if_false);
+ NodeProperties::ChangeOp(check_false,
+ common()->Merge(check_false->InputCount()));
check_true = if_true;
}
@@ -1100,6 +1135,45 @@ Reduction JSTypedLowering::ReduceJSLoadDynamicContext(Node* node) {
}
+Reduction JSTypedLowering::ReduceJSCreateArguments(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateArguments, node->opcode());
+ CreateArgumentsParameters const& p = CreateArgumentsParametersOf(node->op());
+ Node* const frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* const outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+
+ // Use the ArgumentsAccessStub for materializing both mapped and unmapped
+ // arguments object, but only for non-inlined (i.e. outermost) frames.
+ if (p.type() != CreateArgumentsParameters::kRestArray &&
+ outer_state->opcode() != IrOpcode::kFrameState) {
+ Handle<SharedFunctionInfo> shared;
+ Isolate* isolate = jsgraph()->isolate();
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ bool unmapped = p.type() == CreateArgumentsParameters::kUnmappedArguments;
+ Callable callable = CodeFactory::ArgumentsAccess(
+ isolate, unmapped, shared->has_duplicate_parameters());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate, graph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNeedsFrameState);
+ const Operator* new_op = common()->Call(desc);
+ int parameter_count = state_info.parameter_count() - 1;
+ int parameter_offset = parameter_count * kPointerSize;
+ int offset = StandardFrameConstants::kCallerSPOffset + parameter_offset;
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* parameter_pointer = graph()->NewNode(
+ machine()->IntAdd(), graph()->NewNode(machine()->LoadFramePointer()),
+ jsgraph()->IntPtrConstant(offset));
+ node->InsertInput(graph()->zone(), 0, stub_code);
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(parameter_count));
+ node->InsertInput(graph()->zone(), 3, parameter_pointer);
+ NodeProperties::ChangeOp(node, new_op);
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
+
Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
@@ -1116,9 +1190,9 @@ Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
CallDescriptor::kNoFlags);
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- node->ReplaceInput(0, jsgraph()->HeapConstant(shared));
node->InsertInput(graph()->zone(), 0, stub_code);
- node->set_op(new_op);
+ node->InsertInput(graph()->zone(), 1, jsgraph()->HeapConstant(shared));
+ NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1129,7 +1203,7 @@ Reduction JSTypedLowering::ReduceJSCreateClosure(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralArray, node->opcode());
HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
- int length = Handle<FixedArray>::cast(mconst.Value().handle())->length();
+ int length = Handle<FixedArray>::cast(mconst.Value())->length();
int flags = OpParameter<int>(node->op());
// Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
@@ -1148,7 +1222,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralArray(Node* node) {
const Operator* new_op = common()->Call(desc);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 0, stub_code);
- node->set_op(new_op);
+ NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1160,7 +1234,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateLiteralObject, node->opcode());
HeapObjectMatcher mconst(NodeProperties::GetValueInput(node, 2));
// Constants are pairs, see ObjectLiteral::properties_count().
- int length = Handle<FixedArray>::cast(mconst.Value().handle())->length() / 2;
+ int length = Handle<FixedArray>::cast(mconst.Value())->length() / 2;
int flags = OpParameter<int>(node->op());
// Use the FastCloneShallowObjectStub only for shallow boilerplates without
@@ -1178,7 +1252,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(flags));
node->InsertInput(graph()->zone(), 0, stub_code);
- node->set_op(new_op);
+ NodeProperties::ChangeOp(node, new_op);
return Changed(node);
}
@@ -1189,7 +1263,7 @@ Reduction JSTypedLowering::ReduceJSCreateLiteralObject(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateWithContext, node->opcode());
Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Type* input_type = NodeProperties::GetType(input);
if (FLAG_turbo_allocate && input_type->Is(Type::Receiver())) {
// JSCreateWithContext(o:receiver, f)
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -1208,12 +1282,12 @@ Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), input);
a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
// TODO(mstarzinger): We could mutate {node} into the allocation instead.
- NodeProperties::SetBounds(a.allocation(), NodeProperties::GetBounds(node));
+ NodeProperties::SetType(a.allocation(), NodeProperties::GetType(node));
ReplaceWithValue(node, node, a.effect());
node->ReplaceInput(0, a.allocation());
node->ReplaceInput(1, a.effect());
- node->set_op(common()->Finish(1));
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->Finish(1));
return Changed(node);
}
return NoChange();
@@ -1222,17 +1296,15 @@ Reduction JSTypedLowering::ReduceJSCreateWithContext(Node* node) {
Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSCreateBlockContext, node->opcode());
- Node* const input = NodeProperties::GetValueInput(node, 0);
- HeapObjectMatcher minput(input);
- DCHECK(minput.HasValue()); // TODO(mstarzinger): Make ScopeInfo static.
- int context_length =
- Handle<ScopeInfo>::cast(minput.Value().handle())->ContextLength();
+ Handle<ScopeInfo> scope_info = OpParameter<Handle<ScopeInfo>>(node);
+ int context_length = scope_info->ContextLength();
if (FLAG_turbo_allocate && context_length < kBlockContextAllocationLimit) {
// JSCreateBlockContext(s:scope[length < limit], f)
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
Node* const closure = NodeProperties::GetValueInput(node, 1);
Node* const context = NodeProperties::GetContextInput(node);
+ Node* const extension = jsgraph()->Constant(scope_info);
Node* const load = graph()->NewNode(
simplified()->LoadField(
AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX)),
@@ -1242,18 +1314,18 @@ Reduction JSTypedLowering::ReduceJSCreateBlockContext(Node* node) {
a.AllocateArray(context_length, factory()->block_context_map());
a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
- a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), input);
+ a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
a.Store(AccessBuilder::ForContextSlot(Context::GLOBAL_OBJECT_INDEX), load);
for (int i = Context::MIN_CONTEXT_SLOTS; i < context_length; ++i) {
a.Store(AccessBuilder::ForContextSlot(i), jsgraph()->TheHoleConstant());
}
// TODO(mstarzinger): We could mutate {node} into the allocation instead.
- NodeProperties::SetBounds(a.allocation(), NodeProperties::GetBounds(node));
+ NodeProperties::SetType(a.allocation(), NodeProperties::GetType(node));
ReplaceWithValue(node, node, a.effect());
node->ReplaceInput(0, a.allocation());
node->ReplaceInput(1, a.effect());
- node->set_op(common()->Finish(1));
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, common()->Finish(1));
return Changed(node);
}
return NoChange();
@@ -1265,9 +1337,9 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
int const arity = static_cast<int>(p.arity() - 2);
Node* const function = NodeProperties::GetValueInput(node, 0);
- Type* const function_type = NodeProperties::GetBounds(function).upper;
+ Type* const function_type = NodeProperties::GetType(function);
Node* const receiver = NodeProperties::GetValueInput(node, 1);
- Type* const receiver_type = NodeProperties::GetBounds(receiver).upper;
+ Type* const receiver_type = NodeProperties::GetType(receiver);
Node* const effect = NodeProperties::GetEffectInput(node);
Node* const control = NodeProperties::GetControlInput(node);
@@ -1284,8 +1356,9 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
if (is_strict(p.language_mode())) {
flags |= CallDescriptor::kSupportsTailCalls;
}
- node->set_op(common()->Call(Linkage::GetJSCallDescriptor(
- graph()->zone(), false, 1 + arity, flags)));
+ NodeProperties::ChangeOp(node,
+ common()->Call(Linkage::GetJSCallDescriptor(
+ graph()->zone(), false, 1 + arity, flags)));
return Changed(node);
}
}
@@ -1295,8 +1368,8 @@ Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
Reduction JSTypedLowering::ReduceJSForInDone(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInDone, node->opcode());
- node->set_op(machine()->Word32Equal());
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word32Equal());
return Changed(node);
}
@@ -1410,7 +1483,8 @@ Reduction JSTypedLowering::ReduceJSForInPrepare(Node* node) {
cache_array_false0 = cache_type;
cache_length_false0 = efalse0 = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+ simplified()->LoadField(
+ AccessBuilder::ForFixedArrayLength(graph()->zone())),
cache_array_false0, efalse0, if_false0);
}
@@ -1543,20 +1617,19 @@ Reduction JSTypedLowering::ReduceJSForInNext(Node* node) {
control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
ReplaceWithValue(node, node, effect, control);
- node->set_op(common()->Phi(kMachAnyTagged, 2));
node->ReplaceInput(0, vtrue0);
node->ReplaceInput(1, vfalse0);
node->ReplaceInput(2, control);
node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, common()->Phi(kMachAnyTagged, 2));
return Changed(node);
}
Reduction JSTypedLowering::ReduceJSForInStep(Node* node) {
DCHECK_EQ(IrOpcode::kJSForInStep, node->opcode());
- node->set_op(machine()->Int32Add());
node->ReplaceInput(1, jsgraph()->Int32Constant(1));
- DCHECK_EQ(2, node->InputCount());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
return Changed(node);
}
@@ -1566,7 +1639,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
// result value and can simply replace the node if it's eliminable.
if (!NodeProperties::IsConstant(node) && NodeProperties::IsTyped(node) &&
node->op()->HasProperty(Operator::kEliminatable)) {
- Type* upper = NodeProperties::GetBounds(node).upper;
+ Type* upper = NodeProperties::GetType(node);
if (upper->IsConstant()) {
Node* replacement = jsgraph()->Constant(upper->AsConstant()->Value());
ReplaceWithValue(node, replacement);
@@ -1654,6 +1727,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSLoadDynamicGlobal(node);
case IrOpcode::kJSLoadDynamicContext:
return ReduceJSLoadDynamicContext(node);
+ case IrOpcode::kJSCreateArguments:
+ return ReduceJSCreateArguments(node);
case IrOpcode::kJSCreateClosure:
return ReduceJSCreateClosure(node);
case IrOpcode::kJSCreateLiteralArray:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 920f644f28..c11f068e5b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -57,6 +57,7 @@ class JSTypedLowering final : public AdvancedReducer {
Reduction ReduceJSToNumber(Node* node);
Reduction ReduceJSToStringInput(Node* input);
Reduction ReduceJSToString(Node* node);
+ Reduction ReduceJSCreateArguments(Node* node);
Reduction ReduceJSCreateClosure(Node* node);
Reduction ReduceJSCreateLiteralArray(Node* node);
Reduction ReduceJSCreateLiteralObject(Node* node);
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 80b5e2616f..af3decc5b5 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -225,10 +225,9 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kForInDone:
case Runtime::kForInStep:
case Runtime::kGetOriginalConstructor:
- case Runtime::kNewArguments:
case Runtime::kNewClosure:
+ case Runtime::kNewClosure_Tenured:
case Runtime::kNewFunctionContext:
- case Runtime::kNewRestParamSlow:
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
@@ -239,13 +238,23 @@ int Linkage::FrameStateInputCount(Runtime::FunctionId function) {
case Runtime::kTraceExit:
return 0;
case Runtime::kInlineArguments:
+ case Runtime::kInlineArgumentsLength:
+ case Runtime::kInlineCall:
case Runtime::kInlineCallFunction:
case Runtime::kInlineDefaultConstructorCallSuper:
case Runtime::kInlineGetCallerJSFunction:
case Runtime::kInlineGetPrototype:
case Runtime::kInlineRegExpExec:
case Runtime::kInlineSubString:
+ case Runtime::kInlineToInteger:
+ case Runtime::kInlineToLength:
+ case Runtime::kInlineToName:
+ case Runtime::kInlineToNumber:
case Runtime::kInlineToObject:
+ case Runtime::kInlineToPrimitive_Number:
+ case Runtime::kInlineToPrimitive_String:
+ case Runtime::kInlineToPrimitive:
+ case Runtime::kInlineToString:
return 1;
case Runtime::kInlineDeoptimizeNow:
case Runtime::kInlineThrowNotDateError:
@@ -276,7 +285,7 @@ bool CallDescriptor::UsesOnlyRegisters() const {
CallDescriptor* Linkage::GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
- Operator::Properties properties) {
+ Operator::Properties properties, bool needs_frame_state) {
const size_t function_count = 1;
const size_t num_args_count = 1;
const size_t context_count = 1;
@@ -319,9 +328,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
locations.AddParam(regloc(kContextRegister));
types.AddParam(kMachAnyTagged);
- CallDescriptor::Flags flags = Linkage::FrameStateInputCount(function_id) > 0
- ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ CallDescriptor::Flags flags =
+ needs_frame_state && (Linkage::FrameStateInputCount(function_id) > 0)
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
// The target for runtime calls is a code object.
MachineType target_type = kMachAnyTagged;
@@ -388,8 +398,8 @@ CallDescriptor* Linkage::GetJSCallDescriptor(Zone* zone, bool is_osr,
CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
- MachineSignature::Builder types(zone, 0, 5);
- LocationSignature::Builder locations(zone, 0, 5);
+ MachineSignature::Builder types(zone, 0, 6);
+ LocationSignature::Builder locations(zone, 0, 6);
// Add registers for fixed parameters passed via interpreter dispatch.
STATIC_ASSERT(0 == Linkage::kInterpreterAccumulatorParameter);
@@ -412,6 +422,15 @@ CallDescriptor* Linkage::GetInterpreterDispatchDescriptor(Zone* zone) {
types.AddParam(kMachPtr);
locations.AddParam(regloc(kInterpreterDispatchTableRegister));
+ STATIC_ASSERT(5 == Linkage::kInterpreterContextParameter);
+ types.AddParam(kMachAnyTagged);
+#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X87)
+ locations.AddParam(
+ LinkageLocation::ForCallerFrameSlot(kInterpreterContextSpillSlot));
+#else
+ locations.AddParam(regloc(kContextRegister));
+#endif
+
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index f5507a0594..b25fe413c9 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -273,7 +273,7 @@ class Linkage : public ZoneObject {
CallDescriptor::Flags flags);
static CallDescriptor* GetRuntimeCallDescriptor(
Zone* zone, Runtime::FunctionId function, int parameter_count,
- Operator::Properties properties);
+ Operator::Properties properties, bool needs_frame_state = true);
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
@@ -335,6 +335,7 @@ class Linkage : public ZoneObject {
static const int kInterpreterBytecodeOffsetParameter = 2;
static const int kInterpreterBytecodeArrayParameter = 3;
static const int kInterpreterDispatchTableParameter = 4;
+ static const int kInterpreterContextParameter = 5;
private:
CallDescriptor* const incoming_;
diff --git a/deps/v8/src/compiler/live-range-separator.cc b/deps/v8/src/compiler/live-range-separator.cc
new file mode 100644
index 0000000000..f29e4b4a20
--- /dev/null
+++ b/deps/v8/src/compiler/live-range-separator.cc
@@ -0,0 +1,172 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/live-range-separator.h"
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
+namespace {
+
+// Starting from a deferred block, find the last consecutive deferred block.
+RpoNumber GetLastDeferredBlock(const InstructionBlock *block,
+ const InstructionSequence *code) {
+ DCHECK(block->IsDeferred());
+ RpoNumber first = block->rpo_number();
+
+ RpoNumber last = first;
+ for (int i = first.ToInt(); i < code->InstructionBlockCount(); ++i) {
+ RpoNumber at_i = RpoNumber::FromInt(i);
+ const InstructionBlock *block_at_i = code->InstructionBlockAt(at_i);
+ if (!block_at_i->IsDeferred()) break;
+ last = at_i;
+ }
+
+ return last;
+}
+
+
+// Delimits consecutive deferred block sequences.
+void AssociateDeferredBlockSequences(InstructionSequence *code) {
+ for (int blk_id = 0; blk_id < code->InstructionBlockCount(); ++blk_id) {
+ InstructionBlock *block =
+ code->InstructionBlockAt(RpoNumber::FromInt(blk_id));
+ if (!block->IsDeferred()) continue;
+ RpoNumber last = GetLastDeferredBlock(block, code);
+ block->set_last_deferred(last);
+ // We know last is still deferred, and that last + 1, is not (or is an
+ // invalid index). So skip over last + 1 and continue from last + 2. This
+ // way, we visit each block exactly once, and the total complexity of this
+ // function is O(n), n being jthe number of blocks.
+ blk_id = last.ToInt() + 1;
+ }
+}
+
+
+void CreateSplinter(TopLevelLiveRange *range, RegisterAllocationData *data,
+ LifetimePosition first_cut, LifetimePosition last_cut) {
+ DCHECK(!range->IsSplinter());
+ // We can ignore ranges that live solely in deferred blocks.
+ // If a range ends right at the end of a deferred block, it is marked by
+ // the range builder as ending at gap start of the next block - since the
+ // end is a position where the variable isn't live. We need to take that
+ // into consideration.
+ LifetimePosition max_allowed_end = last_cut.NextFullStart();
+
+ if (first_cut <= range->Start() && max_allowed_end >= range->End()) {
+ return;
+ }
+
+ LifetimePosition start = Max(first_cut, range->Start());
+ LifetimePosition end = Min(last_cut, range->End());
+
+ if (start < end) {
+ // Ensure the original range has a spill range associated, before it gets
+ // splintered. Splinters will point to it. This way, when attempting to
+ // reuse spill slots of splinters, during allocation, we avoid clobbering
+ // such slots.
+ if (range->MayRequireSpillRange()) {
+ data->CreateSpillRangeForLiveRange(range);
+ }
+ TopLevelLiveRange *result = data->NextLiveRange(range->machine_type());
+ DCHECK_NULL(data->live_ranges()[result->vreg()]);
+ data->live_ranges()[result->vreg()] = result;
+
+ Zone *zone = data->allocation_zone();
+ range->Splinter(start, end, result, zone);
+ }
+}
+
+
+// Splinter all ranges live inside successive deferred blocks.
+// No control flow analysis is performed. After the register allocation, we will
+// merge the splinters back into the original ranges, and then rely on the
+// range connector to properly connect them.
+void SplinterRangesInDeferredBlocks(RegisterAllocationData *data) {
+ InstructionSequence *code = data->code();
+ int code_block_count = code->InstructionBlockCount();
+ Zone *zone = data->allocation_zone();
+ ZoneVector<BitVector *> &in_sets = data->live_in_sets();
+
+ for (int i = 0; i < code_block_count; ++i) {
+ InstructionBlock *block = code->InstructionBlockAt(RpoNumber::FromInt(i));
+ if (!block->IsDeferred()) continue;
+
+ RpoNumber last_deferred = block->last_deferred();
+ // last_deferred + 1 is not deferred, so no point in visiting it.
+ i = last_deferred.ToInt() + 1;
+
+ LifetimePosition first_cut = LifetimePosition::GapFromInstructionIndex(
+ block->first_instruction_index());
+
+ LifetimePosition last_cut = LifetimePosition::GapFromInstructionIndex(
+ static_cast<int>(code->instructions().size()));
+
+ const BitVector *in_set = in_sets[block->rpo_number().ToInt()];
+ BitVector ranges_to_splinter(*in_set, zone);
+ InstructionBlock *last = code->InstructionBlockAt(last_deferred);
+ for (int deferred_id = block->rpo_number().ToInt();
+ deferred_id <= last->rpo_number().ToInt(); ++deferred_id) {
+ const BitVector *ins = in_sets[deferred_id];
+ ranges_to_splinter.Union(*ins);
+ const BitVector *outs = LiveRangeBuilder::ComputeLiveOut(
+ code->InstructionBlockAt(RpoNumber::FromInt(deferred_id)), data);
+ ranges_to_splinter.Union(*outs);
+ }
+
+ int last_index = last->last_instruction_index();
+ if (code->InstructionAt(last_index)->opcode() ==
+ ArchOpcode::kArchDeoptimize) {
+ ++last_index;
+ }
+ last_cut = LifetimePosition::GapFromInstructionIndex(last_index);
+
+ BitVector::Iterator iterator(&ranges_to_splinter);
+
+ while (!iterator.Done()) {
+ int range_id = iterator.Current();
+ iterator.Advance();
+
+ TopLevelLiveRange *range = data->live_ranges()[range_id];
+ CreateSplinter(range, data, first_cut, last_cut);
+ }
+ }
+}
+} // namespace
+
+
+void LiveRangeSeparator::Splinter() {
+ AssociateDeferredBlockSequences(data()->code());
+ SplinterRangesInDeferredBlocks(data());
+}
+
+
+void LiveRangeMerger::Merge() {
+ int live_range_count = static_cast<int>(data()->live_ranges().size());
+ for (int i = 0; i < live_range_count; ++i) {
+ TopLevelLiveRange *range = data()->live_ranges()[i];
+ if (range == nullptr || range->IsEmpty() || !range->IsSplinter()) {
+ continue;
+ }
+ TopLevelLiveRange *splinter_parent = range->splintered_from();
+
+ int to_remove = range->vreg();
+ splinter_parent->Merge(range, data()->allocation_zone());
+ data()->live_ranges()[to_remove] = nullptr;
+ }
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/live-range-separator.h b/deps/v8/src/compiler/live-range-separator.h
new file mode 100644
index 0000000000..c8e6edc20b
--- /dev/null
+++ b/deps/v8/src/compiler/live-range-separator.h
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIVE_RANGE_SEPARATOR_H_
+#define V8_LIVE_RANGE_SEPARATOR_H_
+
+
+#include <src/zone.h>
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class RegisterAllocationData;
+
+
+// A register allocation pair of transformations: splinter and merge live ranges
+class LiveRangeSeparator final : public ZoneObject {
+ public:
+ LiveRangeSeparator(RegisterAllocationData* data, Zone* zone)
+ : data_(data), zone_(zone) {}
+
+ void Splinter();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ Zone* zone() const { return zone_; }
+
+ RegisterAllocationData* const data_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeSeparator);
+};
+
+
+class LiveRangeMerger final : public ZoneObject {
+ public:
+ LiveRangeMerger(RegisterAllocationData* data, Zone* zone)
+ : data_(data), zone_(zone) {}
+
+ void Merge();
+
+ private:
+ RegisterAllocationData* data() const { return data_; }
+ Zone* zone() const { return zone_; }
+
+ RegisterAllocationData* const data_;
+ Zone* const zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeMerger);
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+#endif // V8_LIVE_RANGE_SEPARATOR_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 86e677d8ee..c174da2f7f 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -213,14 +213,14 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
return ReplaceInt32(m.left().Value() * m.right().Value());
}
if (m.right().Is(-1)) { // x * -1 => 0 - x
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
if (m.right().IsPowerOf2()) { // x * 2^n => x << n
- node->set_op(machine()->Word32Shl());
node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+ NodeProperties::ChangeOp(node, machine()->Word32Shl());
Reduction reduction = ReduceWord32Shl(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -338,9 +338,9 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
case IrOpcode::kFloat64Mul: {
Float64BinopMatcher m(node);
if (m.right().Is(-1)) { // x * -1.0 => -0.0 - x
- node->set_op(machine()->Float64Sub());
node->ReplaceInput(0, Float64Constant(-0.0));
node->ReplaceInput(1, m.left().node());
+ NodeProperties::ChangeOp(node, machine()->Float64Sub());
return Changed(node);
}
if (m.right().Is(1)) return Replace(m.left().node()); // x * 1.0 => x
@@ -461,9 +461,9 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
if (m.left().IsInt32Sub()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.left().Is(0)) { // (0 - x) + y => y - x
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, m.right().node());
node->ReplaceInput(1, mleft.right().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
Reduction const reduction = ReduceInt32Sub(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -471,8 +471,8 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
if (m.right().IsInt32Sub()) {
Int32BinopMatcher mright(m.right().node());
if (mright.left().Is(0)) { // y + (0 - x) => y - x
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(1, mright.right().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
Reduction const reduction = ReduceInt32Sub(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -491,8 +491,8 @@ Reduction MachineOperatorReducer::ReduceInt32Sub(Node* node) {
}
if (m.LeftEqualsRight()) return ReplaceInt32(0); // x - x => 0
if (m.right().HasValue()) { // x - K => x + -K
- node->set_op(machine()->Int32Add());
node->ReplaceInput(1, Int32Constant(-m.right().Value()));
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -514,10 +514,10 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
return Replace(Word32Equal(Word32Equal(m.left().node(), zero), zero));
}
if (m.right().Is(-1)) { // x / -1 => 0 - x
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, m.left().node());
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
if (m.right().HasValue()) {
@@ -536,10 +536,10 @@ Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
quotient = Int32Div(quotient, Abs(divisor));
}
if (divisor < 0) {
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, quotient);
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
return Changed(node);
}
return Replace(quotient);
@@ -565,9 +565,9 @@ Reduction MachineOperatorReducer::ReduceUint32Div(Node* node) {
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
if (base::bits::IsPowerOfTwo32(divisor)) { // x / 2^n => x >> n
- node->set_op(machine()->Word32Shr());
node->ReplaceInput(1, Uint32Constant(WhichPowerOf2(m.right().Value())));
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word32Shr());
return Changed(node);
} else {
return Replace(Uint32Div(dividend, divisor));
@@ -594,18 +594,19 @@ Reduction MachineOperatorReducer::ReduceInt32Mod(Node* node) {
if (base::bits::IsPowerOfTwo32(divisor)) {
uint32_t const mask = divisor - 1;
Node* const zero = Int32Constant(0);
- node->set_op(common()->Select(kMachInt32, BranchHint::kFalse));
node->ReplaceInput(
0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
node->ReplaceInput(
1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
node->ReplaceInput(2, Word32And(dividend, mask));
+ NodeProperties::ChangeOp(
+ node, common()->Select(kMachInt32, BranchHint::kFalse));
} else {
Node* quotient = Int32Div(dividend, divisor);
- node->set_op(machine()->Int32Sub());
DCHECK_EQ(dividend, node->InputAt(0));
node->ReplaceInput(1, Int32Mul(quotient, Int32Constant(divisor)));
node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
}
return Changed(node);
}
@@ -627,15 +628,16 @@ Reduction MachineOperatorReducer::ReduceUint32Mod(Node* node) {
Node* const dividend = m.left().node();
uint32_t const divisor = m.right().Value();
if (base::bits::IsPowerOfTwo32(divisor)) { // x % 2^n => x & 2^n-1
- node->set_op(machine()->Word32And());
node->ReplaceInput(1, Uint32Constant(m.right().Value() - 1));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Word32And());
} else {
Node* quotient = Uint32Div(dividend, divisor);
- node->set_op(machine()->Int32Sub());
DCHECK_EQ(dividend, node->InputAt(0));
node->ReplaceInput(1, Int32Mul(quotient, Uint32Constant(divisor)));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
}
- node->TrimInputCount(2);
return Changed(node);
}
return NoChange();
@@ -663,7 +665,8 @@ Reduction MachineOperatorReducer::ReduceTruncateFloat64ToInt32(Node* node) {
if (reduction.Changed()) input = reduction.replacement();
phi->ReplaceInput(i, input);
}
- phi->set_op(common()->Phi(kMachInt32, value_input_count));
+ NodeProperties::ChangeOp(phi,
+ common()->Phi(kMachInt32, value_input_count));
return Replace(phi);
}
}
@@ -776,10 +779,10 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
Int32BinopMatcher mleft(m.left().node());
if (mleft.right().Is(m.right().Value())) {
- node->set_op(machine()->Word32And());
node->ReplaceInput(0, mleft.left().node());
node->ReplaceInput(1,
Uint32Constant(~((1U << m.right().Value()) - 1U)));
+ NodeProperties::ChangeOp(node, machine()->Word32And());
Reduction reduction = ReduceWord32And(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -800,9 +803,9 @@ Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
if (mleft.left().IsComparison()) {
if (m.right().Is(31) && mleft.right().Is(31)) {
// Comparison << 31 >> 31 => 0 - Comparison
- node->set_op(machine()->Int32Sub());
node->ReplaceInput(0, Int32Constant(0));
node->ReplaceInput(1, mleft.left().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Sub());
Reduction const reduction = ReduceInt32Sub(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -859,9 +862,9 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
if (mleft.right().HasValue() &&
(mleft.right().Value() & mask) == mleft.right().Value()) {
// (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
- node->set_op(machine()->Int32Add());
node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
node->ReplaceInput(1, mleft.right().node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -869,10 +872,10 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
Int32BinopMatcher mleftleft(mleft.left().node());
if (mleftleft.right().IsMultipleOf(-mask)) {
// (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
- node->set_op(machine()->Int32Add());
node->ReplaceInput(0,
Word32And(mleft.right().node(), m.right().node()));
node->ReplaceInput(1, mleftleft.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -881,10 +884,10 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
Int32BinopMatcher mleftright(mleft.right().node());
if (mleftright.right().IsMultipleOf(-mask)) {
// (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
- node->set_op(machine()->Int32Add());
node->ReplaceInput(0,
Word32And(mleft.left().node(), m.right().node()));
node->ReplaceInput(1, mleftright.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -893,10 +896,10 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
Int32BinopMatcher mleftleft(mleft.left().node());
if (mleftleft.right().Is(base::bits::CountTrailingZeros32(mask))) {
// (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
- node->set_op(machine()->Int32Add());
node->ReplaceInput(0,
Word32And(mleft.right().node(), m.right().node()));
node->ReplaceInput(1, mleftleft.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -905,10 +908,10 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
Int32BinopMatcher mleftright(mleft.right().node());
if (mleftright.right().Is(base::bits::CountTrailingZeros32(mask))) {
// (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
- node->set_op(machine()->Int32Add());
node->ReplaceInput(0,
Word32And(mleft.left().node(), m.right().node()));
node->ReplaceInput(1, mleftright.node());
+ NodeProperties::ChangeOp(node, machine()->Int32Add());
Reduction const reduction = ReduceInt32Add(node);
return reduction.Changed() ? reduction : Changed(node);
}
@@ -975,9 +978,9 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
}
- node->set_op(machine()->Word32Ror());
node->ReplaceInput(0, mshl.left().node());
node->ReplaceInput(1, mshr.right().node());
+ NodeProperties::ChangeOp(node, machine()->Word32Ror());
return Changed(node);
}
@@ -1008,31 +1011,57 @@ Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
}
+namespace {
+
+bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
+ if (m.HasValue()) {
+ double v = m.Value();
+ float fv = static_cast<float>(v);
+ return static_cast<double>(fv) == v;
+ }
+ return false;
+}
+
+} // namespace
+
+
Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
DCHECK((IrOpcode::kFloat64Equal == node->opcode()) ||
(IrOpcode::kFloat64LessThan == node->opcode()) ||
(IrOpcode::kFloat64LessThanOrEqual == node->opcode()));
// As all Float32 values have an exact representation in Float64, comparing
// two Float64 values both converted from Float32 is equivalent to comparing
- // the original Float32s, so we can ignore the conversions.
+ // the original Float32s, so we can ignore the conversions. We can also reduce
+ // comparisons of converted Float64 values against constants that can be
+ // represented exactly as Float32.
Float64BinopMatcher m(node);
- if (m.left().IsChangeFloat32ToFloat64() &&
- m.right().IsChangeFloat32ToFloat64()) {
+ if ((m.left().IsChangeFloat32ToFloat64() &&
+ m.right().IsChangeFloat32ToFloat64()) ||
+ (m.left().IsChangeFloat32ToFloat64() &&
+ IsFloat64RepresentableAsFloat32(m.right())) ||
+ (IsFloat64RepresentableAsFloat32(m.left()) &&
+ m.right().IsChangeFloat32ToFloat64())) {
switch (node->opcode()) {
case IrOpcode::kFloat64Equal:
- node->set_op(machine()->Float32Equal());
+ NodeProperties::ChangeOp(node, machine()->Float32Equal());
break;
case IrOpcode::kFloat64LessThan:
- node->set_op(machine()->Float32LessThan());
+ NodeProperties::ChangeOp(node, machine()->Float32LessThan());
break;
case IrOpcode::kFloat64LessThanOrEqual:
- node->set_op(machine()->Float32LessThanOrEqual());
+ NodeProperties::ChangeOp(node, machine()->Float32LessThanOrEqual());
break;
default:
return NoChange();
}
- node->ReplaceInput(0, m.left().InputAt(0));
- node->ReplaceInput(1, m.right().InputAt(0));
+ node->ReplaceInput(
+ 0, m.left().HasValue()
+ ? Float32Constant(static_cast<float>(m.left().Value()))
+ : m.left().InputAt(0));
+ node->ReplaceInput(
+ 1, m.right().HasValue()
+ ? Float32Constant(static_cast<float>(m.right().Value()))
+ : m.right().InputAt(0));
return Changed(node);
}
return NoChange();
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2e2229032c..38bb056157 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -136,6 +136,10 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 0, 1) \
V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 0, 1) \
V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat32ToInt32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastFloat64ToInt64, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt32ToFloat32, Operator::kNoProperties, 1, 0, 1) \
+ V(BitcastInt64ToFloat64, Operator::kNoProperties, 1, 0, 1) \
V(Float32Abs, Operator::kNoProperties, 1, 0, 1) \
V(Float32Add, Operator::kCommutative, 2, 0, 1) \
V(Float32Sub, Operator::kNoProperties, 2, 0, 1) \
@@ -183,15 +187,8 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(MachUint32) \
V(MachInt64) \
V(MachUint64) \
- V(MachAnyTagged) \
- V(RepBit) \
- V(RepWord8) \
- V(RepWord16) \
- V(RepWord32) \
- V(RepWord64) \
- V(RepFloat32) \
- V(RepFloat64) \
- V(RepTagged)
+ V(MachPtr) \
+ V(MachAnyTagged)
struct MachineOperatorGlobalCache {
@@ -279,7 +276,7 @@ static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
Flags flags)
- : zone_(zone), cache_(kCache.Get()), word_(word), flags_(flags) {
+ : cache_(kCache.Get()), word_(word), flags_(flags) {
DCHECK(word == kRepWord32 || word == kRepWord64);
}
@@ -322,10 +319,8 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
default:
break;
}
- // Uncached.
- return new (zone_) Operator1<LoadRepresentation>( // --
- IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, "Load", 2, 1, 1,
- 1, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
@@ -346,10 +341,8 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
default:
break;
}
- // Uncached.
- return new (zone_) Operator1<StoreRepresentation>( // --
- IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
- 1, 0, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
@@ -364,10 +357,8 @@ const Operator* MachineOperatorBuilder::CheckedLoad(
default:
break;
}
- // Uncached.
- return new (zone_) Operator1<CheckedLoadRepresentation>(
- IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite,
- "CheckedLoad", 3, 1, 1, 1, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
@@ -382,10 +373,8 @@ const Operator* MachineOperatorBuilder::CheckedStore(
default:
break;
}
- // Uncached.
- return new (zone_) Operator1<CheckedStoreRepresentation>(
- IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow,
- "CheckedStore", 4, 1, 1, 0, 1, 0, rep);
+ UNREACHABLE();
+ return nullptr;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 0c055b8732..27abfb4acc 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -195,6 +195,13 @@ class MachineOperatorBuilder final : public ZoneObject {
const Operator* TruncateFloat64ToInt32(TruncationMode);
const Operator* TruncateInt64ToInt32();
+ // These operators reinterpret the bits of a floating point number as an
+ // integer and vice versa.
+ const Operator* BitcastFloat32ToInt32();
+ const Operator* BitcastFloat64ToInt64();
+ const Operator* BitcastInt32ToFloat32();
+ const Operator* BitcastInt64ToFloat64();
+
// Floating point operators always operate with IEEE 754 round-to-nearest
// (single-precision).
const Operator* Float32Add();
@@ -297,7 +304,6 @@ class MachineOperatorBuilder final : public ZoneObject {
#undef PSEUDO_OP_LIST
private:
- Zone* const zone_;
MachineOperatorGlobalCache const& cache_;
MachineType const word_;
Flags const flags_;
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 560ef26692..7769b9e739 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -304,8 +304,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -322,8 +322,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -340,8 +340,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.Input##width##Register(2); \
@@ -359,8 +359,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ addu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ addu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
auto offset = i.InputOperand(0).immediate(); \
auto value = i.InputRegister(2); \
@@ -858,6 +858,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
break;
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
}
@@ -900,15 +904,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips pseudo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
// registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(branch->condition);
@@ -960,40 +961,104 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are checked and handled here.
- // For materializations, we use delay slot to set the result true, and
- // in the false case, where we fall thru the branch, we reset the result
- // false.
-
if (instr->arch_opcode() == kMipsTst) {
cc = FlagsConditionToConditionTst(condition);
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
-
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ xori(result, zero_reg, 1); // Create 1 for true.
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (cc == eq) {
+ __ seleqz(result, result, kScratchReg);
+ } else {
+ __ selnez(result, result, kScratchReg);
+ }
+ } else {
+ if (cc == eq) {
+ __ Movn(result, zero_reg, kScratchReg);
+ } else {
+ __ Movz(result, zero_reg, kScratchReg);
+ }
+ }
+ return;
} else if (instr->arch_opcode() == kMipsAddOvf ||
instr->arch_opcode() == kMipsSubOvf) {
// kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
cc = FlagsConditionToConditionOvf(condition);
- __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
-
+ // Return 1 on overflow.
+ __ Slt(result, kCompareReg, Operand(zero_reg));
+ if (cc == ge) // Invert result on not overflow.
+ __ xori(result, result, 1);
+ return;
} else if (instr->arch_opcode() == kMipsCmp) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
cc = FlagsConditionToConditionCmp(condition);
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
-
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Subu(kScratchReg, left, right);
+ __ xori(result, zero_reg, 1);
+ if (IsMipsArchVariant(kMips32r6)) {
+ if (cc == eq) {
+ __ seleqz(result, result, kScratchReg);
+ } else {
+ __ selnez(result, result, kScratchReg);
+ }
+ } else {
+ if (cc == eq) {
+ __ Movn(result, zero_reg, kScratchReg);
+ } else {
+ __ Movz(result, zero_reg, kScratchReg);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
} else if (instr->arch_opcode() == kMipsCmpD ||
instr->arch_opcode() == kMipsCmpS) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
-
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (!IsMipsArchVariant(kMips32r6)) {
@@ -1028,11 +1093,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
TRACE_UNIMPL();
UNIMPLEMENTED();
}
-
- // Fallthrough case is the false materialization.
- __ bind(&false_value);
- __ li(result, Operand(0));
- __ bind(&done);
}
@@ -1361,21 +1421,23 @@ void CodeGenerator::AddNopForSmiCodeInlining() {
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block tramoline pool emission for duration of padding.
- v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
}
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index f95c82627b..3c4b378553 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -402,6 +402,19 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMipsFloat64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMipsAddS, node);
}
@@ -775,7 +788,6 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
- // TODO(plind): Revisit and test this path.
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
}
}
@@ -813,12 +825,32 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
+ switch (cont->condition()) {
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ switch (cont->condition()) {
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index c72d9789b8..053434eec9 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -305,8 +305,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -323,8 +323,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
if (instr->InputAt(0)->IsRegister()) { \
auto offset = i.InputRegister(0); \
__ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(2), offset); \
- __ asm_instr(result, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(2), offset); \
+ __ asm_instr(result, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
__ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset)); \
@@ -341,8 +341,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.Input##width##Register(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.Input##width##Register(2); \
@@ -360,8 +360,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool& predicate,
auto offset = i.InputRegister(0); \
auto value = i.InputRegister(2); \
__ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
- __ Daddu(at, i.InputRegister(3), offset); \
- __ asm_instr(value, MemOperand(at, 0)); \
+ __ Daddu(kScratchReg, i.InputRegister(3), offset); \
+ __ asm_instr(value, MemOperand(kScratchReg, 0)); \
} else { \
int offset = static_cast<int>(i.InputOperand(0).immediate()); \
auto value = i.InputRegister(2); \
@@ -799,6 +799,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64BitcastDL:
+ __ dmfc1(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64BitcastLD:
+ __ dmtc1(i.InputRegister(0), i.OutputDoubleRegister());
+ break;
case kMips64Float64ExtractLowWord32:
__ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
break;
@@ -906,6 +912,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
break;
+ case kCheckedLoadWord64:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ld);
+ break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
break;
@@ -921,6 +930,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(sw);
break;
+ case kCheckedStoreWord64:
+ ASSEMBLE_CHECKED_STORE_INTEGER(sd);
+ break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
break;
@@ -969,7 +981,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Label* tlabel = branch->true_label;
Label* flabel = branch->false_label;
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are handled here by branch
@@ -1028,38 +1039,105 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
DCHECK_NE(0u, instr->OutputCount());
Register result = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = kNoCondition;
-
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
// emit mips pseudo-instructions, which are checked and handled here.
- // For materializations, we use delay slot to set the result true, and
- // in the false case, where we fall through the branch, we reset the result
- // false.
-
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(condition);
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
+ __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
+ __ xori(result, zero_reg, 1); // Create 1 for true.
+ if (kArchVariant == kMips64r6) {
+ if (cc == eq) {
+ __ seleqz(result, result, kScratchReg);
+ } else {
+ __ selnez(result, result, kScratchReg);
+ }
+ } else {
+ if (cc == eq) {
+ __ Movn(result, zero_reg, kScratchReg);
+ } else {
+ __ Movz(result, zero_reg, kScratchReg);
+ }
+ }
+ return;
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(condition);
- __ dsra32(kScratchReg, i.OutputRegister(), 0);
- __ sra(at, i.OutputRegister(), 31);
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
- __ li(result, Operand(1)); // In delay slot.
+ // Check for overflow creates 1 or 0 for result.
+ __ dsrl32(kScratchReg, i.OutputRegister(), 31);
+ __ srl(at, i.OutputRegister(), 31);
+ __ xor_(result, kScratchReg, at);
+ if (cc == eq) // Toggle result for not overflow.
+ __ xori(result, result, 1);
+ return;
} else if (instr->arch_opcode() == kMips64Cmp) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
cc = FlagsConditionToConditionCmp(condition);
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Dsubu(kScratchReg, left, right);
+ __ xori(result, zero_reg, 1);
+ if (kArchVariant == kMips64r6) {
+ if (cc == eq) {
+ __ seleqz(result, result, kScratchReg);
+ } else {
+ __ selnez(result, result, kScratchReg);
+ }
+ } else {
+ if (cc == eq) {
+ __ Movn(result, zero_reg, kScratchReg);
+ } else {
+ __ Movz(result, zero_reg, kScratchReg);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
} else if (instr->arch_opcode() == kMips64CmpD ||
instr->arch_opcode() == kMips64CmpS) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
-
bool predicate;
FPUCondition cc = FlagsConditionToConditionCmpFPU(predicate, condition);
if (kArchVariant != kMips64r6) {
@@ -1094,10 +1172,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
TRACE_UNIMPL();
UNIMPLEMENTED();
}
- // Fallthru case is the false materialization.
- __ bind(&false_value);
- __ li(result, Operand(static_cast<int64_t>(0)));
- __ bind(&done);
}
@@ -1428,21 +1502,23 @@ void CodeGenerator::AddNopForSmiCodeInlining() {
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- // Block tramoline pool emission for duration of padding.
- v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
- masm());
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ // Block tramoline pool emission for duration of padding.
+ v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+ masm());
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
}
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index ce95ad4e37..38e4c46485 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -88,6 +88,8 @@ namespace compiler {
V(Mips64Swc1) \
V(Mips64Ldc1) \
V(Mips64Sdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
V(Mips64Float64ExtractLowWord32) \
V(Mips64Float64ExtractHighWord32) \
V(Mips64Float64InsertLowWord32) \
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 33d6f58c28..d20c1c72f6 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -286,6 +286,17 @@ void InstructionSelector::VisitWord32Sar(Node* node) {
void InstructionSelector::VisitWord64Shl(Node* node) {
+ Mips64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63)) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kMips64Dshl, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
VisitRRO(this, kMips64Dshl, node);
}
@@ -529,6 +540,23 @@ void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
Mips64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(kSmiShift));
+ return;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
g.TempImmediate(0), g.TempImmediate(32));
}
@@ -550,6 +578,29 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kMips64BitcastDL, node);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kMips64BitcastLD, node);
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kMips64AddS, node);
}
@@ -841,6 +892,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedLoadWord64;
+ break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
@@ -885,6 +939,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedStoreWord64;
+ break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
@@ -961,12 +1018,32 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
// Match immediates on left or right side of comparison.
if (g.CanBeImmediate(right, opcode)) {
- VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
- cont);
+ switch (cont->condition()) {
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
} else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
- VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
- cont);
+ switch (cont->condition()) {
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
} else {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
cont);
@@ -1277,16 +1354,12 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64Float64ExtractLowWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64Float64ExtractLowWord32, node);
}
void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
- Mips64OperandGenerator g(this);
- Emit(kMips64Float64ExtractHighWord32, g.DefineAsRegister(node),
- g.UseRegister(node->InputAt(0)));
+ VisitRR(this, kMips64Float64ExtractHighWord32, node);
}
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index d543425fca..bafe3daa68 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -11,7 +11,6 @@
#include "src/assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
-#include "src/unique.h"
namespace v8 {
namespace internal {
@@ -62,14 +61,6 @@ struct ValueMatcher : public NodeMatcher {
return value_;
}
- bool Is(const T& value) const {
- return this->HasValue() && this->Value() == value;
- }
-
- bool IsInRange(const T& low, const T& high) const {
- return this->HasValue() && low <= this->Value() && this->Value() <= high;
- }
-
private:
T value_;
bool has_value_;
@@ -77,6 +68,18 @@ struct ValueMatcher : public NodeMatcher {
template <>
+inline ValueMatcher<uint32_t, IrOpcode::kInt32Constant>::ValueMatcher(
+ Node* node)
+ : NodeMatcher(node),
+ value_(),
+ has_value_(opcode() == IrOpcode::kInt32Constant) {
+ if (has_value_) {
+ value_ = static_cast<uint32_t>(OpParameter<int32_t>(node));
+ }
+}
+
+
+template <>
inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
@@ -94,10 +97,10 @@ inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
Node* node)
: NodeMatcher(node), value_(), has_value_(false) {
if (opcode() == IrOpcode::kInt32Constant) {
- value_ = OpParameter<uint32_t>(node);
+ value_ = static_cast<uint32_t>(OpParameter<int32_t>(node));
has_value_ = true;
} else if (opcode() == IrOpcode::kInt64Constant) {
- value_ = OpParameter<uint64_t>(node);
+ value_ = static_cast<uint64_t>(OpParameter<int64_t>(node));
has_value_ = true;
}
}
@@ -108,6 +111,12 @@ template <typename T, IrOpcode::Value kOpcode>
struct IntMatcher final : public ValueMatcher<T, kOpcode> {
explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+ bool Is(const T& value) const {
+ return this->HasValue() && this->Value() == value;
+ }
+ bool IsInRange(const T& low, const T& high) const {
+ return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ }
bool IsMultipleOf(T n) const {
return this->HasValue() && (this->Value() % n) == 0;
}
@@ -139,6 +148,12 @@ template <typename T, IrOpcode::Value kOpcode>
struct FloatMatcher final : public ValueMatcher<T, kOpcode> {
explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+ bool Is(const T& value) const {
+ return this->HasValue() && this->Value() == value;
+ }
+ bool IsInRange(const T& low, const T& high) const {
+ return this->HasValue() && low <= this->Value() && this->Value() <= high;
+ }
bool IsMinusZero() const {
return this->Is(0.0) && std::signbit(this->Value());
}
@@ -153,9 +168,9 @@ typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
// A pattern matcher for heap object constants.
struct HeapObjectMatcher final
- : public ValueMatcher<Unique<HeapObject>, IrOpcode::kHeapConstant> {
+ : public ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant> {
explicit HeapObjectMatcher(Node* node)
- : ValueMatcher<Unique<HeapObject>, IrOpcode::kHeapConstant>(node) {}
+ : ValueMatcher<Handle<HeapObject>, IrOpcode::kHeapConstant>(node) {}
};
@@ -164,6 +179,9 @@ struct ExternalReferenceMatcher final
: public ValueMatcher<ExternalReference, IrOpcode::kExternalConstant> {
explicit ExternalReferenceMatcher(Node* node)
: ValueMatcher<ExternalReference, IrOpcode::kExternalConstant>(node) {}
+ bool Is(const ExternalReference& value) const {
+ return this->HasValue() && this->Value() == value;
+ }
};
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 19ca5dd1b6..0d061a36c4 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -6,6 +6,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/verifier.h"
namespace v8 {
namespace internal {
@@ -196,6 +197,13 @@ void NodeProperties::ReplaceUses(Node* node, Node* value, Node* effect,
// static
+void NodeProperties::ChangeOp(Node* node, const Operator* new_op) {
+ node->set_op(new_op);
+ Verifier::VerifyNode(node);
+}
+
+
+// static
Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
for (auto use : node->uses()) {
if (use->opcode() == IrOpcode::kProjection &&
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index 6d11f6cfcc..313d3749bb 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -94,6 +94,10 @@ class NodeProperties final {
static void ReplaceUses(Node* node, Node* value, Node* effect = nullptr,
Node* success = nullptr, Node* exception = nullptr);
+ // Safe wrapper to mutate the operator of a node. Checks that the node is
+ // currently in a state that satisfies constraints of the new operator.
+ static void ChangeOp(Node* node, const Operator* new_op);
+
// ---------------------------------------------------------------------------
// Miscellaneous utilities.
@@ -106,25 +110,19 @@ class NodeProperties final {
// - Switch: [ IfValue, ..., IfDefault ]
static void CollectControlProjections(Node* node, Node** proj, size_t count);
-
// ---------------------------------------------------------------------------
- // Type Bounds.
+ // Type.
- static bool IsTyped(Node* node) {
- Bounds const bounds = node->bounds();
- DCHECK(!bounds.lower == !bounds.upper);
- return bounds.upper;
- }
- static Bounds GetBounds(Node* node) {
+ static bool IsTyped(Node* node) { return node->type() != nullptr; }
+ static Type* GetType(Node* node) {
DCHECK(IsTyped(node));
- return node->bounds();
+ return node->type();
}
- static void SetBounds(Node* node, Bounds bounds) {
- DCHECK_NOT_NULL(bounds.lower);
- DCHECK_NOT_NULL(bounds.upper);
- node->set_bounds(bounds);
+ static void SetType(Node* node, Type* type) {
+ DCHECK_NOT_NULL(type);
+ node->set_type(type);
}
- static void RemoveBounds(Node* node) { node->set_bounds(Bounds()); }
+ static void RemoveType(Node* node) { node->set_type(nullptr); }
static bool AllValueInputsAreTyped(Node* node);
private:
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index e92dccc739..022c44db2d 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -112,7 +112,7 @@ Node* Node::Clone(Zone* zone, NodeId id, const Node* node) {
? node->inputs_.inline_
: node->inputs_.outline_->inputs_;
Node* const clone = New(zone, id, node->op(), input_count, inputs, false);
- clone->set_bounds(node->bounds());
+ clone->set_type(node->type());
return clone;
}
@@ -273,6 +273,7 @@ bool Node::OwnedBy(Node const* owner1, Node const* owner2) const {
Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
: op_(op),
+ type_(nullptr),
mark_(0),
bit_field_(IdField::encode(id) | InlineCountField::encode(inline_count) |
InlineCapacityField::encode(inline_capacity)),
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index e6c9f23fdc..d6a9b39a56 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -49,7 +49,6 @@ class Node final {
void Kill();
const Operator* op() const { return op_; }
- void set_op(const Operator* op) { op_ = op; }
IrOpcode::Value opcode() const {
DCHECK(op_->opcode() <= IrOpcode::kLast);
@@ -284,9 +283,12 @@ class Node final {
void* operator new(size_t, void* location) { return location; }
- // Only NodeProperties should manipulate the bounds.
- Bounds bounds() const { return bounds_; }
- void set_bounds(Bounds b) { bounds_ = b; }
+ // Only NodeProperties should manipulate the op.
+ void set_op(const Operator* op) { op_ = op; }
+
+ // Only NodeProperties should manipulate the type.
+ Type* type() const { return type_; }
+ void set_type(Type* type) { type_ = type; }
// Only NodeMarkers should manipulate the marks on nodes.
Mark mark() { return mark_; }
@@ -306,7 +308,7 @@ class Node final {
static const int kMaxInlineCapacity = InlineCapacityField::kMax - 1;
const Operator* op_;
- Bounds bounds_;
+ Type* type_;
Mark mark_;
uint32_t bit_field_;
Use* first_use_;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index dcf71eb43d..33e17f6dd0 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -109,6 +109,7 @@
#define JS_OBJECT_OP_LIST(V) \
V(JSCreate) \
+ V(JSCreateArguments) \
V(JSCreateClosure) \
V(JSCreateLiteralArray) \
V(JSCreateLiteralObject) \
@@ -192,8 +193,7 @@
V(StoreField) \
V(StoreBuffer) \
V(StoreElement) \
- V(ObjectIsSmi) \
- V(ObjectIsNonNegativeSmi)
+ V(ObjectIsSmi)
// Opcodes for Machine-level operators.
#define MACHINE_COMPARE_BINOP_LIST(V) \
@@ -261,6 +261,10 @@
V(TruncateFloat64ToFloat32) \
V(TruncateFloat64ToInt32) \
V(TruncateInt64ToInt32) \
+ V(BitcastFloat32ToInt32) \
+ V(BitcastFloat64ToInt64) \
+ V(BitcastInt32ToFloat32) \
+ V(BitcastInt64ToFloat64) \
V(Float32Add) \
V(Float32Sub) \
V(Float32Mul) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 6de6d2487c..60e6ad7636 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -45,6 +45,7 @@ int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
case IrOpcode::kJSInstanceOf:
// Object operations
+ case IrOpcode::kJSCreateArguments:
case IrOpcode::kJSCreateLiteralArray:
case IrOpcode::kJSCreateLiteralObject:
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 5b15b97289..eba430f927 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -9,6 +9,7 @@
#include "src/base/flags.h"
#include "src/base/functional.h"
+#include "src/handles.h"
#include "src/zone.h"
namespace v8 {
@@ -155,7 +156,8 @@ class Operator1 : public Operator {
bool Equals(const Operator* other) const final {
if (opcode() != other->opcode()) return false;
- const Operator1<T>* that = reinterpret_cast<const Operator1<T>*>(other);
+ const Operator1<T, Pred, Hash>* that =
+ reinterpret_cast<const Operator1<T, Pred, Hash>*>(other);
return this->pred_(this->parameter(), that->parameter());
}
size_t HashCode() const final {
@@ -185,7 +187,8 @@ inline T const& OpParameter(const Operator* op) {
}
// NOTE: We have to be careful to use the right equal/hash functions below, for
-// float/double we always use the ones operating on the bit level.
+// float/double we always use the ones operating on the bit level, for Handle<>
+// we always use the ones operating on the location level.
template <>
inline float const& OpParameter(const Operator* op) {
return reinterpret_cast<const Operator1<float, base::bit_equal_to<float>,
@@ -200,6 +203,27 @@ inline double const& OpParameter(const Operator* op) {
->parameter();
}
+template <>
+inline Handle<HeapObject> const& OpParameter(const Operator* op) {
+ return reinterpret_cast<
+ const Operator1<Handle<HeapObject>, Handle<HeapObject>::equal_to,
+ Handle<HeapObject>::hash>*>(op)->parameter();
+}
+
+template <>
+inline Handle<String> const& OpParameter(const Operator* op) {
+ return reinterpret_cast<const Operator1<
+ Handle<String>, Handle<String>::equal_to, Handle<String>::hash>*>(op)
+ ->parameter();
+}
+
+template <>
+inline Handle<ScopeInfo> const& OpParameter(const Operator* op) {
+ return reinterpret_cast<
+ const Operator1<Handle<ScopeInfo>, Handle<ScopeInfo>::equal_to,
+ Handle<ScopeInfo>::hash>*>(op)->parameter();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index 663cf57808..77eea3ce2c 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -99,7 +99,7 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
copy = graph->NewNode(orig->op(), orig->InputCount(), &tmp_inputs[0]);
if (NodeProperties::IsTyped(orig)) {
- NodeProperties::SetBounds(copy, NodeProperties::GetBounds(orig));
+ NodeProperties::SetType(copy, NodeProperties::GetType(orig));
}
mapping->at(orig->id()) = copy;
TRACE(" copy #%d:%s -> #%d\n", orig->id(), orig->op()->mnemonic(),
@@ -237,7 +237,7 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
NodeId const id = end->InputAt(i)->id();
for (NodeVector* const copy : copies) {
end->AppendInput(graph->zone(), copy->at(id));
- end->set_op(common->End(end->InputCount()));
+ NodeProperties::ChangeOp(end, common->End(end->InputCount()));
}
}
@@ -301,12 +301,14 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
CHECK_NE(0, live_input_count);
for (Node* const use : osr_loop->uses()) {
if (NodeProperties::IsPhi(use)) {
- use->set_op(common->ResizeMergeOrPhi(use->op(), live_input_count));
use->RemoveInput(0);
+ NodeProperties::ChangeOp(
+ use, common->ResizeMergeOrPhi(use->op(), live_input_count));
}
}
- osr_loop->set_op(common->ResizeMergeOrPhi(osr_loop->op(), live_input_count));
osr_loop->RemoveInput(0);
+ NodeProperties::ChangeOp(
+ osr_loop, common->ResizeMergeOrPhi(osr_loop->op(), live_input_count));
// Run control reduction and graph trimming.
// TODO(bmeurer): The OSR deconstruction could be a regular reducer and play
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 964d77fe13..209ddfdf0d 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -12,6 +12,7 @@
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
+#include "src/compiler/bytecode-graph-builder.h"
#include "src/compiler/change-lowering.h"
#include "src/compiler/code-generator.h"
#include "src/compiler/common-operator-reducer.h"
@@ -35,6 +36,7 @@
#include "src/compiler/js-type-feedback-lowering.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
+#include "src/compiler/live-range-separator.h"
#include "src/compiler/load-elimination.h"
#include "src/compiler/loop-analysis.h"
#include "src/compiler/loop-peeling.h"
@@ -42,7 +44,6 @@
#include "src/compiler/move-optimizer.h"
#include "src/compiler/osr.h"
#include "src/compiler/pipeline-statistics.h"
-#include "src/compiler/preprocess-live-ranges.h"
#include "src/compiler/register-allocator.h"
#include "src/compiler/register-allocator-verifier.h"
#include "src/compiler/schedule.h"
@@ -350,21 +351,6 @@ void TraceSchedule(CompilationInfo* info, Schedule* schedule) {
}
-base::SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
- if (info->code_stub() != NULL) {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- const char* major_name = CodeStub::MajorName(major_key, false);
- size_t len = strlen(major_name) + 1;
- base::SmartArrayPointer<char> name(new char[len]);
- memcpy(name.get(), major_name, len);
- return name;
- } else {
- AllowHandleDereference allow_deref;
- return info->literal()->debug_name()->ToCString();
- }
-}
-
-
class AstGraphBuilderWithPositions final : public AstGraphBuilder {
public:
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
@@ -487,11 +473,21 @@ struct GraphBuilderPhase {
static const char* phase_name() { return "graph builder"; }
void Run(PipelineData* data, Zone* temp_zone) {
- AstGraphBuilderWithPositions graph_builder(
- temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->js_type_feedback(), data->source_positions());
bool stack_check = !data->info()->IsStub();
- if (!graph_builder.CreateGraph(stack_check)) {
+ bool succeeded = false;
+
+ if (data->info()->shared_info()->HasBytecodeArray()) {
+ BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
+ data->jsgraph());
+ succeeded = graph_builder.CreateGraph(stack_check);
+ } else {
+ AstGraphBuilderWithPositions graph_builder(
+ temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
+ data->js_type_feedback(), data->source_positions());
+ succeeded = graph_builder.CreateGraph(stack_check);
+ }
+
+ if (!succeeded) {
data->set_compilation_failed();
}
}
@@ -508,9 +504,10 @@ struct InliningPhase {
CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
data->common(), data->machine());
JSContextSpecialization context_specialization(
- &graph_reducer, data->jsgraph(), data->info()->is_context_specializing()
- ? data->info()->context()
- : MaybeHandle<Context>());
+ &graph_reducer, data->jsgraph(),
+ data->info()->is_function_context_specializing()
+ ? data->info()->context()
+ : MaybeHandle<Context>());
JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
data->jsgraph());
JSInliner inliner(&graph_reducer, data->info()->is_inlining_enabled()
@@ -794,13 +791,13 @@ struct BuildLiveRangesPhase {
};
-struct PreprocessLiveRangesPhase {
- static const char* phase_name() { return "preprocess live ranges"; }
+struct SplinterLiveRangesPhase {
+ static const char* phase_name() { return "splinter live ranges"; }
void Run(PipelineData* data, Zone* temp_zone) {
- PreprocessLiveRanges live_range_preprocessor(
- data->register_allocation_data(), temp_zone);
- live_range_preprocessor.PreprocessRanges();
+ LiveRangeSeparator live_range_splinterer(data->register_allocation_data(),
+ temp_zone);
+ live_range_splinterer.Splinter();
}
};
@@ -829,6 +826,16 @@ struct AllocateDoubleRegistersPhase {
};
+struct MergeSplintersPhase {
+ static const char* phase_name() { return "merge splintered ranges"; }
+ void Run(PipelineData* pipeline_data, Zone* temp_zone) {
+ RegisterAllocationData* data = pipeline_data->register_allocation_data();
+ LiveRangeMerger live_range_merger(data, temp_zone);
+ live_range_merger.Merge();
+ }
+};
+
+
struct LocateSpillSlotsPhase {
static const char* phase_name() { return "locate spill slots"; }
@@ -996,10 +1003,8 @@ Handle<Code> Pipeline::GenerateCode() {
// TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
// the correct solution is to restore the context register after invoking
// builtins from full-codegen.
- for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
- Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
- Object* builtin = isolate()->js_builtins_object()->javascript_builtin(id);
- if (*info()->closure() == builtin) return Handle<Code>::null();
+ if (Context::IsJSBuiltin(isolate()->native_context(), info()->closure())) {
+ return Handle<Code>::null();
}
ZonePool zone_pool;
@@ -1016,8 +1021,7 @@ Handle<Code> Pipeline::GenerateCode() {
OFStream json_of(json_file);
Handle<Script> script = info()->script();
FunctionLiteral* function = info()->literal();
- base::SmartArrayPointer<char> function_name =
- info()->shared_info()->DebugName()->ToCString();
+ base::SmartArrayPointer<char> function_name = info()->GetDebugName();
int pos = info()->shared_info()->start_position();
json_of << "{\"function\":\"" << function_name.get()
<< "\", \"sourcePosition\":" << pos << ", \"source\":\"";
@@ -1049,7 +1053,7 @@ Handle<Code> Pipeline::GenerateCode() {
if (FLAG_trace_turbo) {
OFStream os(stdout);
os << "---------------------------------------------------\n"
- << "Begin compiling method " << GetDebugName(info()).get()
+ << "Begin compiling method " << info()->GetDebugName().get()
<< " using Turbofan" << std::endl;
TurboCfgFile tcf(isolate());
tcf << AsC1VCompilation(info());
@@ -1146,6 +1150,36 @@ Handle<Code> Pipeline::GenerateCode() {
}
+Handle<Code> Pipeline::GenerateCodeForInterpreter(
+ Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule, const char* bytecode_name) {
+ CompilationInfo info(bytecode_name, isolate, graph->zone());
+
+ // Construct a pipeline for scheduling and code generation.
+ ZonePool zone_pool;
+ PipelineData data(&zone_pool, &info, graph, schedule);
+ base::SmartPointer<PipelineStatistics> pipeline_statistics;
+ if (FLAG_turbo_stats) {
+ pipeline_statistics.Reset(new PipelineStatistics(&info, &zone_pool));
+ pipeline_statistics->BeginPhaseKind("interpreter handler codegen");
+ }
+ if (FLAG_trace_turbo) {
+ FILE* json_file = OpenVisualizerLogFile(&info, NULL, "json", "w+");
+ if (json_file != nullptr) {
+ OFStream json_of(json_file);
+ json_of << "{\"function\":\"" << info.GetDebugName().get()
+ << "\", \"source\":\"\",\n\"phases\":[";
+ fclose(json_file);
+ }
+ }
+
+ Pipeline pipeline(&info);
+ pipeline.data_ = &data;
+ pipeline.RunPrintAndVerify("Machine", true);
+ return pipeline.ScheduleAndGenerateCode(call_descriptor);
+}
+
+
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule) {
@@ -1155,16 +1189,6 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
}
-Handle<Code> Pipeline::GenerateCodeForTesting(Isolate* isolate,
- CallDescriptor* call_descriptor,
- Graph* graph,
- Schedule* schedule) {
- FakeStubForTesting stub(isolate);
- CompilationInfo info(&stub, isolate, graph->zone());
- return GenerateCodeForTesting(&info, call_descriptor, graph, schedule);
-}
-
-
Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
Graph* graph,
@@ -1192,8 +1216,7 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
InstructionSequence* sequence,
bool run_verifier) {
- FakeStubForTesting stub(sequence->isolate());
- CompilationInfo info(&stub, sequence->isolate(), sequence->zone());
+ CompilationInfo info("testing", sequence->isolate(), sequence->zone());
ZonePool zone_pool;
PipelineData data(&zone_pool, &info, sequence);
Pipeline pipeline(&info);
@@ -1293,7 +1316,7 @@ Handle<Code> Pipeline::ScheduleAndGenerateCode(
}
OFStream os(stdout);
os << "---------------------------------------------------\n"
- << "Finished compiling method " << GetDebugName(info()).get()
+ << "Finished compiling method " << info()->GetDebugName().get()
<< " using Turbofan" << std::endl;
}
@@ -1317,7 +1340,7 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
base::SmartArrayPointer<char> debug_name;
#ifdef DEBUG
- debug_name = GetDebugName(data->info());
+ debug_name = info()->GetDebugName();
#endif
data->InitializeRegisterAllocationData(config, descriptor, debug_name.get());
@@ -1340,12 +1363,20 @@ void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
}
if (FLAG_turbo_preprocess_ranges) {
- Run<PreprocessLiveRangesPhase>();
+ Run<SplinterLiveRangesPhase>();
}
- // TODO(mtrofin): re-enable greedy once we have bots for range preprocessing.
- Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
- Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
+ if (FLAG_turbo_greedy_regalloc) {
+ Run<AllocateGeneralRegistersPhase<GreedyAllocator>>();
+ Run<AllocateDoubleRegistersPhase<GreedyAllocator>>();
+ } else {
+ Run<AllocateGeneralRegistersPhase<LinearScanAllocator>>();
+ Run<AllocateDoubleRegistersPhase<LinearScanAllocator>>();
+ }
+
+ if (FLAG_turbo_preprocess_ranges) {
+ Run<MergeSplintersPhase>();
+ }
if (FLAG_turbo_frame_elision) {
Run<LocateSpillSlotsPhase>();
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index ea8b7e9b4b..90c223f67e 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -28,16 +28,15 @@ class Pipeline {
// Run the entire pipeline and generate a handle to a code object.
Handle<Code> GenerateCode();
- // Run the pipeline on a machine graph and generate code. If {schedule} is
- // {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
- Graph* graph,
- Schedule* schedule = nullptr);
+ // Run the pipeline on an interpreter bytecode handler machine graph and
+ // generate code.
+ static Handle<Code> GenerateCodeForInterpreter(
+ Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
+ Schedule* schedule, const char* bytecode_name);
// Run the pipeline on a machine graph and generate code. If {schedule} is
// {nullptr}, then compute a new schedule for code generation.
- static Handle<Code> GenerateCodeForTesting(Isolate* isolate,
- CallDescriptor* call_descriptor,
+ static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
Graph* graph,
Schedule* schedule = nullptr);
@@ -46,11 +45,14 @@ class Pipeline {
InstructionSequence* sequence,
bool run_verifier);
- private:
+ // Run the pipeline on a machine graph and generate code. If {schedule} is
+ // {nullptr}, then compute a new schedule for code generation.
static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
CallDescriptor* call_descriptor,
- Graph* graph, Schedule* schedule);
+ Graph* graph,
+ Schedule* schedule = nullptr);
+ private:
CompilationInfo* info_;
PipelineData* data_;
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 2acea2f1d5..df776fac68 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -187,8 +187,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#else
return ge;
#endif
- case kUnorderedEqual:
- case kUnorderedNotEqual:
+ default:
break;
}
UNREACHABLE();
@@ -1076,6 +1075,20 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kPPC_BitcastFloat32ToInt32:
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kPPC_BitcastInt32ToFloat32:
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#if V8_TARGET_ARCH_PPC64
+ case kPPC_BitcastDoubleToInt64:
+ __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kPPC_BitcastInt64ToDouble:
+ __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
break;
@@ -1142,6 +1155,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
break;
+ case kCheckedLoadWord64:
+#if V8_TARGET_ARCH_PPC64
+ ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
+#else
+ UNREACHABLE();
+#endif
+ break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
break;
@@ -1157,6 +1177,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
break;
+ case kCheckedStoreWord64:
+#if V8_TARGET_ARCH_PPC64
+ ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
+#else
+ UNREACHABLE();
+#endif
+ break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT32();
break;
@@ -1596,18 +1623,20 @@ void CodeGenerator::AddNopForSmiCodeInlining() {
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
- while (padding_size > 0) {
- __ nop();
- padding_size -= v8::internal::Assembler::kInstrSize;
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+ while (padding_size > 0) {
+ __ nop();
+ padding_size -= v8::internal::Assembler::kInstrSize;
}
}
}
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index c817ef2a92..ed9bbcd91c 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -86,6 +86,10 @@ namespace compiler {
V(PPC_DoubleInsertLowWord32) \
V(PPC_DoubleInsertHighWord32) \
V(PPC_DoubleConstruct) \
+ V(PPC_BitcastInt32ToFloat32) \
+ V(PPC_BitcastFloat32ToInt32) \
+ V(PPC_BitcastInt64ToDouble) \
+ V(PPC_BitcastDoubleToInt64) \
V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 197dcc13ea..91c65d14c1 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -282,6 +282,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedLoadWord64;
+ break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
@@ -317,6 +320,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedStoreWord64;
+ break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
@@ -904,6 +910,30 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
#endif
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kPPC_BitcastFloat32ToInt32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kPPC_BitcastDoubleToInt64, node);
+}
+#endif
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ VisitRR(this, kPPC_BitcastInt32ToFloat32, node);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kPPC_BitcastInt64ToDouble, node);
+}
+#endif
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitRRR(this, kPPC_AddDouble, node);
}
diff --git a/deps/v8/src/compiler/preprocess-live-ranges.cc b/deps/v8/src/compiler/preprocess-live-ranges.cc
deleted file mode 100644
index fee3a3d98c..0000000000
--- a/deps/v8/src/compiler/preprocess-live-ranges.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/preprocess-live-ranges.h"
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-#define TRACE(...) \
- do { \
- if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
- } while (false)
-
-
-namespace {
-
-LiveRange* Split(LiveRange* range, RegisterAllocationData* data,
- LifetimePosition pos) {
- DCHECK(range->Start() < pos && pos < range->End());
- DCHECK(pos.IsStart() || pos.IsGapPosition() ||
- (data->code()
- ->GetInstructionBlock(pos.ToInstructionIndex())
- ->last_instruction_index() != pos.ToInstructionIndex()));
- LiveRange* result = data->NewChildRangeFor(range);
- range->SplitAt(pos, result, data->allocation_zone());
- TRACE("Split range %d(v%d) @%d => %d.\n", range->id(),
- range->TopLevel()->id(), pos.ToInstructionIndex(), result->id());
- return result;
-}
-
-
-LifetimePosition GetSplitPositionForInstruction(const LiveRange* range,
- int instruction_index) {
- LifetimePosition ret = LifetimePosition::Invalid();
-
- ret = LifetimePosition::GapFromInstructionIndex(instruction_index);
- if (range->Start() >= ret || ret >= range->End()) {
- return LifetimePosition::Invalid();
- }
- return ret;
-}
-
-
-LiveRange* SplitRangeAfterBlock(LiveRange* range, RegisterAllocationData* data,
- const InstructionBlock* block) {
- const InstructionSequence* code = data->code();
- int last_index = block->last_instruction_index();
- int outside_index = static_cast<int>(code->instructions().size());
- bool has_handler = false;
- for (auto successor_id : block->successors()) {
- const InstructionBlock* successor = code->InstructionBlockAt(successor_id);
- if (successor->IsHandler()) {
- has_handler = true;
- }
- outside_index = Min(outside_index, successor->first_instruction_index());
- }
- int split_at = has_handler ? outside_index : last_index;
- LifetimePosition after_block =
- GetSplitPositionForInstruction(range, split_at);
-
- if (after_block.IsValid()) {
- return Split(range, data, after_block);
- }
-
- return range;
-}
-
-
-int GetFirstInstructionIndex(const UseInterval* interval) {
- int ret = interval->start().ToInstructionIndex();
- if (!interval->start().IsGapPosition() && !interval->start().IsStart()) {
- ++ret;
- }
- return ret;
-}
-
-
-bool DoesSubsequenceClobber(const InstructionSequence* code, int start,
- int end) {
- for (int i = start; i <= end; ++i) {
- if (code->InstructionAt(i)->IsCall()) return true;
- }
- return false;
-}
-
-
-void SplitRangeAtDeferredBlocksWithCalls(LiveRange* range,
- RegisterAllocationData* data) {
- DCHECK(!range->IsFixed());
- DCHECK(!range->spilled());
- if (range->TopLevel()->HasSpillOperand()) {
- TRACE(
- "Skipping deferred block analysis for live range %d because it has a "
- "spill operand.\n",
- range->TopLevel()->id());
- return;
- }
-
- const InstructionSequence* code = data->code();
- LiveRange* current_subrange = range;
-
- UseInterval* interval = current_subrange->first_interval();
-
- while (interval != nullptr) {
- int first_index = GetFirstInstructionIndex(interval);
- int last_index = interval->end().ToInstructionIndex();
-
- if (last_index > code->LastInstructionIndex()) {
- last_index = code->LastInstructionIndex();
- }
-
- interval = interval->next();
-
- for (int index = first_index; index <= last_index;) {
- const InstructionBlock* block = code->GetInstructionBlock(index);
- int last_block_index = static_cast<int>(block->last_instruction_index());
- int last_covered_index = Min(last_index, last_block_index);
- int working_index = index;
- index = block->last_instruction_index() + 1;
-
- if (!block->IsDeferred() ||
- !DoesSubsequenceClobber(code, working_index, last_covered_index)) {
- continue;
- }
-
- TRACE("Deferred block B%d clobbers range %d(v%d).\n",
- block->rpo_number().ToInt(), current_subrange->id(),
- current_subrange->TopLevel()->id());
- LifetimePosition block_start =
- GetSplitPositionForInstruction(current_subrange, working_index);
- LiveRange* block_and_after = nullptr;
- if (block_start.IsValid()) {
- block_and_after = Split(current_subrange, data, block_start);
- } else {
- block_and_after = current_subrange;
- }
- LiveRange* next = SplitRangeAfterBlock(block_and_after, data, block);
- if (next != current_subrange) interval = next->first_interval();
- current_subrange = next;
- break;
- }
- }
-}
-}
-
-
-void PreprocessLiveRanges::PreprocessRanges() {
- SplitRangesAroundDeferredBlocks();
-}
-
-
-void PreprocessLiveRanges::SplitRangesAroundDeferredBlocks() {
- size_t live_range_count = data()->live_ranges().size();
- for (size_t i = 0; i < live_range_count; i++) {
- LiveRange* range = data()->live_ranges()[i];
- if (range != nullptr && !range->IsEmpty() && !range->spilled() &&
- !range->IsFixed() && !range->IsChild()) {
- SplitRangeAtDeferredBlocksWithCalls(range, data());
- }
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/compiler/preprocess-live-ranges.h b/deps/v8/src/compiler/preprocess-live-ranges.h
deleted file mode 100644
index aa852fc7ca..0000000000
--- a/deps/v8/src/compiler/preprocess-live-ranges.h
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PREPROCESS_LIVE_RANGES_H_
-#define V8_PREPROCESS_LIVE_RANGES_H_
-
-#include "src/compiler/register-allocator.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-class PreprocessLiveRanges final {
- public:
- PreprocessLiveRanges(RegisterAllocationData* data, Zone* zone)
- : data_(data), zone_(zone) {}
- void PreprocessRanges();
-
- private:
- void SplitRangesAroundDeferredBlocks();
-
- RegisterAllocationData* data() { return data_; }
- Zone* zone() { return zone_; }
-
- RegisterAllocationData* data_;
- Zone* zone_;
-};
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-#endif // V8_PREPROCESS_LIVE_RANGES_H_
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 8013f422f6..f590902df3 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -25,13 +25,14 @@ RawMachineAssembler::RawMachineAssembler(Isolate* isolate, Graph* graph,
parameters_(nullptr),
current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
- Node* s = graph->NewNode(common_.Start(param_count));
+ // Add an extra input node for the JSFunction parameter to the start node.
+ Node* s = graph->NewNode(common_.Start(param_count + 1));
graph->SetStart(s);
if (parameter_count() == 0) return;
parameters_ = zone()->NewArray<Node*>(param_count);
for (size_t i = 0; i < parameter_count(); ++i) {
parameters_[i] =
- NewNode(common()->Parameter(static_cast<int>(i)), graph->start());
+ AddNode(common()->Parameter(static_cast<int>(i)), graph->start());
}
}
@@ -63,7 +64,7 @@ void RawMachineAssembler::Goto(Label* label) {
void RawMachineAssembler::Branch(Node* condition, Label* true_val,
Label* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = NewNode(common()->Branch(), condition);
+ Node* branch = AddNode(common()->Branch(), condition);
schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
current_block_ = nullptr;
}
@@ -74,7 +75,7 @@ void RawMachineAssembler::Switch(Node* index, Label* default_label,
size_t case_count) {
DCHECK_NE(schedule()->end(), current_block_);
size_t succ_count = case_count + 1;
- Node* switch_node = NewNode(common()->Switch(succ_count), index);
+ Node* switch_node = AddNode(common()->Switch(succ_count), index);
BasicBlock** succ_blocks = zone()->NewArray<BasicBlock*>(succ_count);
for (size_t index = 0; index < case_count; ++index) {
int32_t case_value = case_values[index];
@@ -94,7 +95,7 @@ void RawMachineAssembler::Switch(Node* index, Label* default_label,
void RawMachineAssembler::Return(Node* value) {
- Node* ret = graph()->NewNode(common()->Return(), value);
+ Node* ret = MakeNode(common()->Return(), 1, &value);
schedule()->AddReturn(CurrentBlock(), ret);
current_block_ = nullptr;
}
@@ -104,15 +105,56 @@ Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
Node** args) {
int param_count =
static_cast<int>(desc->GetMachineSignature()->parameter_count());
- Node** buffer = zone()->NewArray<Node*>(param_count + 1);
+ int input_count = param_count + 3;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
int index = 0;
buffer[index++] = function;
for (int i = 0; i < param_count; i++) {
buffer[index++] = args[i];
}
- Node* call = graph()->NewNode(common()->Call(desc), param_count + 1, buffer);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ buffer[index++] = graph()->start();
+ buffer[index++] = graph()->start();
+ return AddNode(common()->Call(desc), input_count, buffer);
+}
+
+
+Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
+ Node* function, Node** args,
+ Node* frame_state) {
+ DCHECK(desc->NeedsFrameState());
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int input_count = param_count + 4;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ buffer[index++] = frame_state;
+ buffer[index++] = graph()->start();
+ buffer[index++] = graph()->start();
+ return AddNode(common()->Call(desc), input_count, buffer);
+}
+
+
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
+ Node** args) {
+ int param_count =
+ static_cast<int>(desc->GetMachineSignature()->parameter_count());
+ int input_count = param_count + 3;
+ Node** buffer = zone()->NewArray<Node*>(input_count);
+ int index = 0;
+ buffer[index++] = function;
+ for (int i = 0; i < param_count; i++) {
+ buffer[index++] = args[i];
+ }
+ buffer[index++] = graph()->start();
+ buffer[index++] = graph()->start();
+ Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
+ schedule()->AddTailCall(CurrentBlock(), tail_call);
+ current_block_ = nullptr;
+ return tail_call;
}
@@ -124,39 +166,38 @@ Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
isolate(), zone(), callable.descriptor(), 1,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
Node* stub_code = HeapConstant(callable.code());
- Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
- receiver, context, frame_state);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ return AddNode(common()->Call(desc), stub_code, function, receiver, context,
+ frame_state, graph()->start(), graph()->start());
}
-Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
- Node* context, Node* frame_state) {
- CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
- zone(), false, 1, CallDescriptor::kNeedsFrameState);
- Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
- context, frame_state);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
+ Node* arg1, Node* context) {
+ CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+ zone(), function, 1, Operator::kNoProperties, false);
+
+ Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+ Node* ref = AddNode(
+ common()->ExternalConstant(ExternalReference(function, isolate())));
+ Node* arity = Int32Constant(1);
+
+ return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context,
+ graph()->start(), graph()->start());
}
-Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
- Node* arg0, Node* context,
- Node* frame_state) {
+Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
+ Node* arg1, Node* arg2, Node* context) {
CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
- zone(), function, 1, Operator::kNoProperties);
+ zone(), function, 2, Operator::kNoProperties, false);
Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
- Node* ref = NewNode(
+ Node* ref = AddNode(
common()->ExternalConstant(ExternalReference(function, isolate())));
- Node* arity = Int32Constant(1);
+ Node* arity = Int32Constant(2);
- Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
- arity, context, frame_state);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
+ context, graph()->start(), graph()->start());
}
@@ -167,9 +208,8 @@ Node* RawMachineAssembler::CallCFunction0(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- Node* call = graph()->NewNode(common()->Call(descriptor), function);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ return AddNode(common()->Call(descriptor), function, graph()->start(),
+ graph()->start());
}
@@ -182,9 +222,8 @@ Node* RawMachineAssembler::CallCFunction1(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- Node* call = graph()->NewNode(common()->Call(descriptor), function, arg0);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ return AddNode(common()->Call(descriptor), function, arg0, graph()->start(),
+ graph()->start());
}
@@ -199,10 +238,8 @@ Node* RawMachineAssembler::CallCFunction2(MachineType return_type,
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
- Node* call =
- graph()->NewNode(common()->Call(descriptor), function, arg0, arg1);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
+ return AddNode(common()->Call(descriptor), function, arg0, arg1,
+ graph()->start(), graph()->start());
}
@@ -222,24 +259,20 @@ Node* RawMachineAssembler::CallCFunction8(
builder.AddParam(arg5_type);
builder.AddParam(arg6_type);
builder.AddParam(arg7_type);
+ Node* args[] = {function,
+ arg0,
+ arg1,
+ arg2,
+ arg3,
+ arg4,
+ arg5,
+ arg6,
+ arg7,
+ graph()->start(),
+ graph()->start()};
const CallDescriptor* descriptor =
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
-
- Node* call = graph()->NewNode(common()->Call(descriptor), function, arg0,
- arg1, arg2, arg3, arg4, arg5, arg6, arg7);
- schedule()->AddNode(CurrentBlock(), call);
- return call;
-}
-
-
-Node* RawMachineAssembler::TailCallInterpreterDispatch(
- const CallDescriptor* call_descriptor, Node* target, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5) {
- Node* tail_call =
- graph()->NewNode(common()->TailCall(call_descriptor), target, arg1, arg2,
- arg3, arg4, arg5, graph()->start(), graph()->start());
- schedule()->AddTailCall(CurrentBlock(), tail_call);
- return tail_call;
+ return AddNode(common()->Call(descriptor), arraysize(args), args);
}
@@ -269,15 +302,23 @@ BasicBlock* RawMachineAssembler::CurrentBlock() {
}
-Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
- Node** inputs) {
+Node* RawMachineAssembler::AddNode(const Operator* op, int input_count,
+ Node** inputs) {
DCHECK_NOT_NULL(schedule_);
DCHECK(current_block_ != nullptr);
- Node* node = graph()->NewNode(op, input_count, inputs);
+ Node* node = MakeNode(op, input_count, inputs);
schedule()->AddNode(CurrentBlock(), node);
return node;
}
+
+Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
+ Node** inputs) {
+ // The raw machine assembler nodes do not have effect and control inputs,
+ // so we disable checking input counts here.
+ return graph()->NewNodeUnchecked(op, input_count, inputs);
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 05f4ebab02..291f69f3b0 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -12,6 +12,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
+#include "src/factory.h"
namespace v8 {
namespace internal {
@@ -27,6 +28,10 @@ class Schedule;
// In order to create a schedule on-the-fly, the assembler keeps track of basic
// blocks by having one current basic block being populated and by referencing
// other basic blocks through the use of labels.
+//
+// Also note that the generated graph is only valid together with the generated
+// schedule, using one without the other is invalid as the graph is inherently
+// non-schedulable due to missing control and effect dependencies.
class RawMachineAssembler {
public:
class Label {
@@ -60,7 +65,6 @@ class RawMachineAssembler {
const MachineSignature* machine_sig() const {
return call_descriptor_->GetMachineSignature();
}
- BasicBlock* CurrentBlock();
// Finalizes the schedule and exports it to be used for code generation. Note
// that this RawMachineAssembler becomes invalid after export.
@@ -72,9 +76,7 @@ class RawMachineAssembler {
// hence will not switch the current basic block.
Node* UndefinedConstant() {
- Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(
- isolate()->factory()->undefined_value());
- return NewNode(common()->HeapConstant(unique));
+ return HeapConstant(isolate()->factory()->undefined_value());
}
// Constants.
@@ -87,33 +89,33 @@ class RawMachineAssembler {
: Int32Constant(static_cast<int>(value));
}
Node* Int32Constant(int32_t value) {
- return NewNode(common()->Int32Constant(value));
+ return AddNode(common()->Int32Constant(value));
}
Node* Int64Constant(int64_t value) {
- return NewNode(common()->Int64Constant(value));
+ return AddNode(common()->Int64Constant(value));
}
Node* NumberConstant(double value) {
- return NewNode(common()->NumberConstant(value));
+ return AddNode(common()->NumberConstant(value));
}
Node* Float32Constant(float value) {
- return NewNode(common()->Float32Constant(value));
+ return AddNode(common()->Float32Constant(value));
}
Node* Float64Constant(double value) {
- return NewNode(common()->Float64Constant(value));
+ return AddNode(common()->Float64Constant(value));
}
Node* HeapConstant(Handle<HeapObject> object) {
- Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
- return NewNode(common()->HeapConstant(val));
+ return AddNode(common()->HeapConstant(object));
}
- Node* HeapConstant(Unique<HeapObject> object) {
- return NewNode(common()->HeapConstant(object));
+ Node* BooleanConstant(bool value) {
+ Handle<Object> object = isolate()->factory()->ToBoolean(value);
+ return HeapConstant(Handle<HeapObject>::cast(object));
}
Node* ExternalConstant(ExternalReference address) {
- return NewNode(common()->ExternalConstant(address));
+ return AddNode(common()->ExternalConstant(address));
}
Node* Projection(int index, Node* a) {
- return NewNode(common()->Projection(index), a);
+ return AddNode(common()->Projection(index), a);
}
// Memory Operations.
@@ -121,39 +123,39 @@ class RawMachineAssembler {
return Load(rep, base, IntPtrConstant(0));
}
Node* Load(MachineType rep, Node* base, Node* index) {
- return NewNode(machine()->Load(rep), base, index, graph()->start(),
+ return AddNode(machine()->Load(rep), base, index, graph()->start(),
graph()->start());
}
Node* Store(MachineType rep, Node* base, Node* value) {
return Store(rep, base, IntPtrConstant(0), value);
}
Node* Store(MachineType rep, Node* base, Node* index, Node* value) {
- return NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
+ return AddNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)),
base, index, value, graph()->start(), graph()->start());
}
// Arithmetic Operations.
Node* WordAnd(Node* a, Node* b) {
- return NewNode(machine()->WordAnd(), a, b);
+ return AddNode(machine()->WordAnd(), a, b);
}
- Node* WordOr(Node* a, Node* b) { return NewNode(machine()->WordOr(), a, b); }
+ Node* WordOr(Node* a, Node* b) { return AddNode(machine()->WordOr(), a, b); }
Node* WordXor(Node* a, Node* b) {
- return NewNode(machine()->WordXor(), a, b);
+ return AddNode(machine()->WordXor(), a, b);
}
Node* WordShl(Node* a, Node* b) {
- return NewNode(machine()->WordShl(), a, b);
+ return AddNode(machine()->WordShl(), a, b);
}
Node* WordShr(Node* a, Node* b) {
- return NewNode(machine()->WordShr(), a, b);
+ return AddNode(machine()->WordShr(), a, b);
}
Node* WordSar(Node* a, Node* b) {
- return NewNode(machine()->WordSar(), a, b);
+ return AddNode(machine()->WordSar(), a, b);
}
Node* WordRor(Node* a, Node* b) {
- return NewNode(machine()->WordRor(), a, b);
+ return AddNode(machine()->WordRor(), a, b);
}
Node* WordEqual(Node* a, Node* b) {
- return NewNode(machine()->WordEqual(), a, b);
+ return AddNode(machine()->WordEqual(), a, b);
}
Node* WordNotEqual(Node* a, Node* b) {
return WordBinaryNot(WordEqual(a, b));
@@ -174,29 +176,29 @@ class RawMachineAssembler {
}
Node* Word32And(Node* a, Node* b) {
- return NewNode(machine()->Word32And(), a, b);
+ return AddNode(machine()->Word32And(), a, b);
}
Node* Word32Or(Node* a, Node* b) {
- return NewNode(machine()->Word32Or(), a, b);
+ return AddNode(machine()->Word32Or(), a, b);
}
Node* Word32Xor(Node* a, Node* b) {
- return NewNode(machine()->Word32Xor(), a, b);
+ return AddNode(machine()->Word32Xor(), a, b);
}
Node* Word32Shl(Node* a, Node* b) {
- return NewNode(machine()->Word32Shl(), a, b);
+ return AddNode(machine()->Word32Shl(), a, b);
}
Node* Word32Shr(Node* a, Node* b) {
- return NewNode(machine()->Word32Shr(), a, b);
+ return AddNode(machine()->Word32Shr(), a, b);
}
Node* Word32Sar(Node* a, Node* b) {
- return NewNode(machine()->Word32Sar(), a, b);
+ return AddNode(machine()->Word32Sar(), a, b);
}
Node* Word32Ror(Node* a, Node* b) {
- return NewNode(machine()->Word32Ror(), a, b);
+ return AddNode(machine()->Word32Ror(), a, b);
}
- Node* Word32Clz(Node* a) { return NewNode(machine()->Word32Clz(), a); }
+ Node* Word32Clz(Node* a) { return AddNode(machine()->Word32Clz(), a); }
Node* Word32Equal(Node* a, Node* b) {
- return NewNode(machine()->Word32Equal(), a, b);
+ return AddNode(machine()->Word32Equal(), a, b);
}
Node* Word32NotEqual(Node* a, Node* b) {
return Word32BinaryNot(Word32Equal(a, b));
@@ -205,28 +207,28 @@ class RawMachineAssembler {
Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
Node* Word64And(Node* a, Node* b) {
- return NewNode(machine()->Word64And(), a, b);
+ return AddNode(machine()->Word64And(), a, b);
}
Node* Word64Or(Node* a, Node* b) {
- return NewNode(machine()->Word64Or(), a, b);
+ return AddNode(machine()->Word64Or(), a, b);
}
Node* Word64Xor(Node* a, Node* b) {
- return NewNode(machine()->Word64Xor(), a, b);
+ return AddNode(machine()->Word64Xor(), a, b);
}
Node* Word64Shl(Node* a, Node* b) {
- return NewNode(machine()->Word64Shl(), a, b);
+ return AddNode(machine()->Word64Shl(), a, b);
}
Node* Word64Shr(Node* a, Node* b) {
- return NewNode(machine()->Word64Shr(), a, b);
+ return AddNode(machine()->Word64Shr(), a, b);
}
Node* Word64Sar(Node* a, Node* b) {
- return NewNode(machine()->Word64Sar(), a, b);
+ return AddNode(machine()->Word64Sar(), a, b);
}
Node* Word64Ror(Node* a, Node* b) {
- return NewNode(machine()->Word64Ror(), a, b);
+ return AddNode(machine()->Word64Ror(), a, b);
}
Node* Word64Equal(Node* a, Node* b) {
- return NewNode(machine()->Word64Equal(), a, b);
+ return AddNode(machine()->Word64Equal(), a, b);
}
Node* Word64NotEqual(Node* a, Node* b) {
return Word64BinaryNot(Word64Equal(a, b));
@@ -235,49 +237,49 @@ class RawMachineAssembler {
Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
Node* Int32Add(Node* a, Node* b) {
- return NewNode(machine()->Int32Add(), a, b);
+ return AddNode(machine()->Int32Add(), a, b);
}
Node* Int32AddWithOverflow(Node* a, Node* b) {
- return NewNode(machine()->Int32AddWithOverflow(), a, b);
+ return AddNode(machine()->Int32AddWithOverflow(), a, b);
}
Node* Int32Sub(Node* a, Node* b) {
- return NewNode(machine()->Int32Sub(), a, b);
+ return AddNode(machine()->Int32Sub(), a, b);
}
Node* Int32SubWithOverflow(Node* a, Node* b) {
- return NewNode(machine()->Int32SubWithOverflow(), a, b);
+ return AddNode(machine()->Int32SubWithOverflow(), a, b);
}
Node* Int32Mul(Node* a, Node* b) {
- return NewNode(machine()->Int32Mul(), a, b);
+ return AddNode(machine()->Int32Mul(), a, b);
}
Node* Int32MulHigh(Node* a, Node* b) {
- return NewNode(machine()->Int32MulHigh(), a, b);
+ return AddNode(machine()->Int32MulHigh(), a, b);
}
Node* Int32Div(Node* a, Node* b) {
- return NewNode(machine()->Int32Div(), a, b, graph()->start());
+ return AddNode(machine()->Int32Div(), a, b, graph()->start());
}
Node* Int32Mod(Node* a, Node* b) {
- return NewNode(machine()->Int32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Int32Mod(), a, b, graph()->start());
}
Node* Int32LessThan(Node* a, Node* b) {
- return NewNode(machine()->Int32LessThan(), a, b);
+ return AddNode(machine()->Int32LessThan(), a, b);
}
Node* Int32LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Int32LessThanOrEqual(), a, b);
+ return AddNode(machine()->Int32LessThanOrEqual(), a, b);
}
Node* Uint32Div(Node* a, Node* b) {
- return NewNode(machine()->Uint32Div(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Div(), a, b, graph()->start());
}
Node* Uint32LessThan(Node* a, Node* b) {
- return NewNode(machine()->Uint32LessThan(), a, b);
+ return AddNode(machine()->Uint32LessThan(), a, b);
}
Node* Uint32LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
+ return AddNode(machine()->Uint32LessThanOrEqual(), a, b);
}
Node* Uint32Mod(Node* a, Node* b) {
- return NewNode(machine()->Uint32Mod(), a, b, graph()->start());
+ return AddNode(machine()->Uint32Mod(), a, b, graph()->start());
}
Node* Uint32MulHigh(Node* a, Node* b) {
- return NewNode(machine()->Uint32MulHigh(), a, b);
+ return AddNode(machine()->Uint32MulHigh(), a, b);
}
Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
@@ -286,42 +288,42 @@ class RawMachineAssembler {
Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
Node* Int64Add(Node* a, Node* b) {
- return NewNode(machine()->Int64Add(), a, b);
+ return AddNode(machine()->Int64Add(), a, b);
}
Node* Int64Sub(Node* a, Node* b) {
- return NewNode(machine()->Int64Sub(), a, b);
+ return AddNode(machine()->Int64Sub(), a, b);
}
Node* Int64Mul(Node* a, Node* b) {
- return NewNode(machine()->Int64Mul(), a, b);
+ return AddNode(machine()->Int64Mul(), a, b);
}
Node* Int64Div(Node* a, Node* b) {
- return NewNode(machine()->Int64Div(), a, b);
+ return AddNode(machine()->Int64Div(), a, b);
}
Node* Int64Mod(Node* a, Node* b) {
- return NewNode(machine()->Int64Mod(), a, b);
+ return AddNode(machine()->Int64Mod(), a, b);
}
Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
Node* Int64LessThan(Node* a, Node* b) {
- return NewNode(machine()->Int64LessThan(), a, b);
+ return AddNode(machine()->Int64LessThan(), a, b);
}
Node* Int64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+ return AddNode(machine()->Int64LessThanOrEqual(), a, b);
}
Node* Uint64LessThan(Node* a, Node* b) {
- return NewNode(machine()->Uint64LessThan(), a, b);
+ return AddNode(machine()->Uint64LessThan(), a, b);
}
Node* Uint64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Uint64LessThanOrEqual(), a, b);
+ return AddNode(machine()->Uint64LessThanOrEqual(), a, b);
}
Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
return Int64LessThanOrEqual(b, a);
}
Node* Uint64Div(Node* a, Node* b) {
- return NewNode(machine()->Uint64Div(), a, b);
+ return AddNode(machine()->Uint64Div(), a, b);
}
Node* Uint64Mod(Node* a, Node* b) {
- return NewNode(machine()->Uint64Mod(), a, b);
+ return AddNode(machine()->Uint64Mod(), a, b);
}
#define INTPTR_BINOP(prefix, name) \
@@ -342,30 +344,30 @@ class RawMachineAssembler {
#undef INTPTR_BINOP
Node* Float32Add(Node* a, Node* b) {
- return NewNode(machine()->Float32Add(), a, b);
+ return AddNode(machine()->Float32Add(), a, b);
}
Node* Float32Sub(Node* a, Node* b) {
- return NewNode(machine()->Float32Sub(), a, b);
+ return AddNode(machine()->Float32Sub(), a, b);
}
Node* Float32Mul(Node* a, Node* b) {
- return NewNode(machine()->Float32Mul(), a, b);
+ return AddNode(machine()->Float32Mul(), a, b);
}
Node* Float32Div(Node* a, Node* b) {
- return NewNode(machine()->Float32Div(), a, b);
+ return AddNode(machine()->Float32Div(), a, b);
}
- Node* Float32Abs(Node* a) { return NewNode(machine()->Float32Abs(), a); }
- Node* Float32Sqrt(Node* a) { return NewNode(machine()->Float32Sqrt(), a); }
+ Node* Float32Abs(Node* a) { return AddNode(machine()->Float32Abs(), a); }
+ Node* Float32Sqrt(Node* a) { return AddNode(machine()->Float32Sqrt(), a); }
Node* Float32Equal(Node* a, Node* b) {
- return NewNode(machine()->Float32Equal(), a, b);
+ return AddNode(machine()->Float32Equal(), a, b);
}
Node* Float32NotEqual(Node* a, Node* b) {
return WordBinaryNot(Float32Equal(a, b));
}
Node* Float32LessThan(Node* a, Node* b) {
- return NewNode(machine()->Float32LessThan(), a, b);
+ return AddNode(machine()->Float32LessThan(), a, b);
}
Node* Float32LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Float32LessThanOrEqual(), a, b);
+ return AddNode(machine()->Float32LessThanOrEqual(), a, b);
}
Node* Float32GreaterThan(Node* a, Node* b) { return Float32LessThan(b, a); }
Node* Float32GreaterThanOrEqual(Node* a, Node* b) {
@@ -373,33 +375,33 @@ class RawMachineAssembler {
}
Node* Float64Add(Node* a, Node* b) {
- return NewNode(machine()->Float64Add(), a, b);
+ return AddNode(machine()->Float64Add(), a, b);
}
Node* Float64Sub(Node* a, Node* b) {
- return NewNode(machine()->Float64Sub(), a, b);
+ return AddNode(machine()->Float64Sub(), a, b);
}
Node* Float64Mul(Node* a, Node* b) {
- return NewNode(machine()->Float64Mul(), a, b);
+ return AddNode(machine()->Float64Mul(), a, b);
}
Node* Float64Div(Node* a, Node* b) {
- return NewNode(machine()->Float64Div(), a, b);
+ return AddNode(machine()->Float64Div(), a, b);
}
Node* Float64Mod(Node* a, Node* b) {
- return NewNode(machine()->Float64Mod(), a, b);
+ return AddNode(machine()->Float64Mod(), a, b);
}
- Node* Float64Abs(Node* a) { return NewNode(machine()->Float64Abs(), a); }
- Node* Float64Sqrt(Node* a) { return NewNode(machine()->Float64Sqrt(), a); }
+ Node* Float64Abs(Node* a) { return AddNode(machine()->Float64Abs(), a); }
+ Node* Float64Sqrt(Node* a) { return AddNode(machine()->Float64Sqrt(), a); }
Node* Float64Equal(Node* a, Node* b) {
- return NewNode(machine()->Float64Equal(), a, b);
+ return AddNode(machine()->Float64Equal(), a, b);
}
Node* Float64NotEqual(Node* a, Node* b) {
return WordBinaryNot(Float64Equal(a, b));
}
Node* Float64LessThan(Node* a, Node* b) {
- return NewNode(machine()->Float64LessThan(), a, b);
+ return AddNode(machine()->Float64LessThan(), a, b);
}
Node* Float64LessThanOrEqual(Node* a, Node* b) {
- return NewNode(machine()->Float64LessThanOrEqual(), a, b);
+ return AddNode(machine()->Float64LessThanOrEqual(), a, b);
}
Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
@@ -408,62 +410,74 @@ class RawMachineAssembler {
// Conversions.
Node* ChangeFloat32ToFloat64(Node* a) {
- return NewNode(machine()->ChangeFloat32ToFloat64(), a);
+ return AddNode(machine()->ChangeFloat32ToFloat64(), a);
}
Node* ChangeInt32ToFloat64(Node* a) {
- return NewNode(machine()->ChangeInt32ToFloat64(), a);
+ return AddNode(machine()->ChangeInt32ToFloat64(), a);
}
Node* ChangeUint32ToFloat64(Node* a) {
- return NewNode(machine()->ChangeUint32ToFloat64(), a);
+ return AddNode(machine()->ChangeUint32ToFloat64(), a);
}
Node* ChangeFloat64ToInt32(Node* a) {
- return NewNode(machine()->ChangeFloat64ToInt32(), a);
+ return AddNode(machine()->ChangeFloat64ToInt32(), a);
}
Node* ChangeFloat64ToUint32(Node* a) {
- return NewNode(machine()->ChangeFloat64ToUint32(), a);
+ return AddNode(machine()->ChangeFloat64ToUint32(), a);
}
Node* ChangeInt32ToInt64(Node* a) {
- return NewNode(machine()->ChangeInt32ToInt64(), a);
+ return AddNode(machine()->ChangeInt32ToInt64(), a);
}
Node* ChangeUint32ToUint64(Node* a) {
- return NewNode(machine()->ChangeUint32ToUint64(), a);
+ return AddNode(machine()->ChangeUint32ToUint64(), a);
}
Node* TruncateFloat64ToFloat32(Node* a) {
- return NewNode(machine()->TruncateFloat64ToFloat32(), a);
+ return AddNode(machine()->TruncateFloat64ToFloat32(), a);
}
Node* TruncateFloat64ToInt32(TruncationMode mode, Node* a) {
- return NewNode(machine()->TruncateFloat64ToInt32(mode), a);
+ return AddNode(machine()->TruncateFloat64ToInt32(mode), a);
}
Node* TruncateInt64ToInt32(Node* a) {
- return NewNode(machine()->TruncateInt64ToInt32(), a);
+ return AddNode(machine()->TruncateInt64ToInt32(), a);
+ }
+ Node* BitcastFloat32ToInt32(Node* a) {
+ return AddNode(machine()->BitcastFloat32ToInt32(), a);
+ }
+ Node* BitcastFloat64ToInt64(Node* a) {
+ return AddNode(machine()->BitcastFloat64ToInt64(), a);
+ }
+ Node* BitcastInt32ToFloat32(Node* a) {
+ return AddNode(machine()->BitcastInt32ToFloat32(), a);
+ }
+ Node* BitcastInt64ToFloat64(Node* a) {
+ return AddNode(machine()->BitcastInt64ToFloat64(), a);
}
Node* Float64RoundDown(Node* a) {
- return NewNode(machine()->Float64RoundDown().op(), a);
+ return AddNode(machine()->Float64RoundDown().op(), a);
}
Node* Float64RoundTruncate(Node* a) {
- return NewNode(machine()->Float64RoundTruncate().op(), a);
+ return AddNode(machine()->Float64RoundTruncate().op(), a);
}
Node* Float64RoundTiesAway(Node* a) {
- return NewNode(machine()->Float64RoundTiesAway().op(), a);
+ return AddNode(machine()->Float64RoundTiesAway().op(), a);
}
// Float64 bit operations.
Node* Float64ExtractLowWord32(Node* a) {
- return NewNode(machine()->Float64ExtractLowWord32(), a);
+ return AddNode(machine()->Float64ExtractLowWord32(), a);
}
Node* Float64ExtractHighWord32(Node* a) {
- return NewNode(machine()->Float64ExtractHighWord32(), a);
+ return AddNode(machine()->Float64ExtractHighWord32(), a);
}
Node* Float64InsertLowWord32(Node* a, Node* b) {
- return NewNode(machine()->Float64InsertLowWord32(), a, b);
+ return AddNode(machine()->Float64InsertLowWord32(), a, b);
}
Node* Float64InsertHighWord32(Node* a, Node* b) {
- return NewNode(machine()->Float64InsertHighWord32(), a, b);
+ return AddNode(machine()->Float64InsertHighWord32(), a, b);
}
// Stack operations.
- Node* LoadStackPointer() { return NewNode(machine()->LoadStackPointer()); }
- Node* LoadFramePointer() { return NewNode(machine()->LoadFramePointer()); }
+ Node* LoadStackPointer() { return AddNode(machine()->LoadStackPointer()); }
+ Node* LoadFramePointer() { return AddNode(machine()->LoadFramePointer()); }
// Parameters.
Node* Parameter(size_t index);
@@ -481,16 +495,19 @@ class RawMachineAssembler {
// Call a given call descriptor and the given arguments.
Node* CallN(CallDescriptor* desc, Node* function, Node** args);
-
+ // Call a given call descriptor and the given arguments and frame-state.
+ Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
+ Node* frame_state);
+ // Tail call the given call descriptor and the given arguments.
+ Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
// Call through CallFunctionStub with lazy deopt and frame-state.
Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
Node* frame_state, CallFunctionFlags flags);
- // Call to a JS function with zero arguments.
- Node* CallJS0(Node* function, Node* receiver, Node* context,
- Node* frame_state);
- // Call to a runtime function with zero arguments.
- Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
- Node* frame_state);
+ // Call to a runtime function with one arguments.
+ Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
+ // Call to a runtime function with two arguments.
+ Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
+ Node* context);
// Call to a C function with zero arguments.
Node* CallCFunction0(MachineType return_type, Node* function);
// Call to a C function with one parameter.
@@ -508,9 +525,6 @@ class RawMachineAssembler {
MachineType arg7_type, Node* function, Node* arg0,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
- Node* TailCallInterpreterDispatch(const CallDescriptor* call_descriptor,
- Node* target, Node* arg1, Node* arg2,
- Node* arg3, Node* arg4, Node* arg5);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
@@ -527,13 +541,13 @@ class RawMachineAssembler {
// Variables.
Node* Phi(MachineType type, Node* n1, Node* n2) {
- return NewNode(common()->Phi(type, 2), n1, n2);
+ return AddNode(common()->Phi(type, 2), n1, n2);
}
Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
- return NewNode(common()->Phi(type, 3), n1, n2, n3);
+ return AddNode(common()->Phi(type, 3), n1, n2, n3);
}
Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
- return NewNode(common()->Phi(type, 4), n1, n2, n3, n4);
+ return AddNode(common()->Phi(type, 4), n1, n2, n3, n4);
}
// ===========================================================================
@@ -541,48 +555,23 @@ class RawMachineAssembler {
// are not covered by the above utility methods. There should rarely be a need
// to do that outside of testing though.
- Node* NewNode(const Operator* op) {
- return MakeNode(op, 0, static_cast<Node**>(NULL));
- }
-
- Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+ Node* AddNode(const Operator* op, int input_count, Node** inputs);
- Node* NewNode(const Operator* op, Node* n1, Node* n2) {
- Node* buffer[] = {n1, n2};
- return MakeNode(op, arraysize(buffer), buffer);
+ Node* AddNode(const Operator* op) {
+ return AddNode(op, 0, static_cast<Node**>(nullptr));
}
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
- Node* buffer[] = {n1, n2, n3};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
- Node* buffer[] = {n1, n2, n3, n4};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5) {
- Node* buffer[] = {n1, n2, n3, n4, n5};
- return MakeNode(op, arraysize(buffer), buffer);
- }
-
- Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
- Node* n5, Node* n6) {
- Node* nodes[] = {n1, n2, n3, n4, n5, n6};
- return MakeNode(op, arraysize(nodes), nodes);
- }
-
- Node* NewNode(const Operator* op, int value_input_count,
- Node** value_inputs) {
- return MakeNode(op, value_input_count, value_inputs);
+ template <class... TArgs>
+ Node* AddNode(const Operator* op, Node* n1, TArgs... args) {
+ Node* buffer[] = {n1, args...};
+ return AddNode(op, sizeof...(args) + 1, buffer);
}
private:
Node* MakeNode(const Operator* op, int input_count, Node** inputs);
BasicBlock* Use(Label* label);
BasicBlock* EnsureBlock(Label* label);
+ BasicBlock* CurrentBlock();
Isolate* isolate_;
Graph* graph_;
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 101a10ae5b..840c13b1a7 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -230,40 +230,27 @@ std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
}
-struct LiveRange::SpillAtDefinitionList : ZoneObject {
- SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
- SpillAtDefinitionList* next)
- : gap_index(gap_index), operand(operand), next(next) {}
- const int gap_index;
- InstructionOperand* const operand;
- SpillAtDefinitionList* const next;
-};
-
-
const float LiveRange::kInvalidWeight = -1;
const float LiveRange::kMaxWeight = std::numeric_limits<float>::max();
-LiveRange::LiveRange(int id, MachineType machine_type)
- : id_(id),
- spill_start_index_(kMaxInt),
+LiveRange::LiveRange(int relative_id, MachineType machine_type,
+ TopLevelLiveRange* top_level)
+ : relative_id_(relative_id),
bits_(0),
last_interval_(nullptr),
first_interval_(nullptr),
first_pos_(nullptr),
- parent_(nullptr),
+ top_level_(top_level),
next_(nullptr),
- spill_operand_(nullptr),
- spills_at_definition_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr),
size_(kInvalidSize),
weight_(kInvalidWeight),
- spilled_in_deferred_block_(false) {
+ group_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
- bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
- AssignedRegisterField::encode(kUnassignedRegister) |
+ bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
MachineTypeField::encode(machine_type);
}
@@ -315,121 +302,6 @@ RegisterKind LiveRange::kind() const {
}
-void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
- InstructionOperand* operand) {
- DCHECK(HasNoSpillType());
- spills_at_definition_ = new (zone)
- SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
-}
-
-
-bool LiveRange::TryCommitSpillInDeferredBlock(
- InstructionSequence* code, const InstructionOperand& spill_operand) {
- DCHECK(!IsChild());
-
- if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
- spill_operand.IsConstant() || spill_operand.IsImmediate()) {
- return false;
- }
-
- int count = 0;
- for (const LiveRange* child = this; child != nullptr; child = child->next()) {
- int first_instr = child->Start().ToInstructionIndex();
-
- // If the range starts at instruction end, the first instruction index is
- // the next one.
- if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
- ++first_instr;
- }
-
- // We only look at where the range starts. It doesn't matter where it ends:
- // if it ends past this block, then either there is a phi there already,
- // or ResolveControlFlow will adapt the last instruction gap of this block
- // as if there were a phi. In either case, data flow will be correct.
- const InstructionBlock* block = code->GetInstructionBlock(first_instr);
-
- // If we have slot uses in a subrange, bail out, because we need the value
- // on the stack before that use.
- bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
- if (!block->IsDeferred()) {
- if (child->spilled() || has_slot_use) {
- TRACE(
- "Live Range %d must be spilled at definition: found a "
- "slot-requiring non-deferred child range %d.\n",
- TopLevel()->id(), child->id());
- return false;
- }
- } else {
- if (child->spilled() || has_slot_use) ++count;
- }
- }
- if (count == 0) return false;
-
- spill_start_index_ = -1;
- spilled_in_deferred_block_ = true;
-
- TRACE("Live Range %d will be spilled only in deferred blocks.\n", id());
- // If we have ranges that aren't spilled but require the operand on the stack,
- // make sure we insert the spill.
- for (const LiveRange* child = this; child != nullptr; child = child->next()) {
- if (!child->spilled() &&
- child->NextSlotPosition(child->Start()) != nullptr) {
- auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
- // Insert spill at the end to let live range connections happen at START.
- auto move =
- instr->GetOrCreateParallelMove(Instruction::END, code->zone());
- InstructionOperand assigned = child->GetAssignedOperand();
- if (TopLevel()->has_slot_use()) {
- bool found = false;
- for (auto move_op : *move) {
- if (move_op->IsEliminated()) continue;
- if (move_op->source().Equals(assigned) &&
- move_op->destination().Equals(spill_operand)) {
- found = true;
- break;
- }
- }
- if (found) continue;
- }
-
- move->AddMove(assigned, spill_operand);
- }
- }
-
- return true;
-}
-
-
-void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
- const InstructionOperand& op,
- bool might_be_duplicated) {
- DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
- DCHECK(!IsChild());
- auto zone = sequence->zone();
-
- for (auto to_spill = spills_at_definition_; to_spill != nullptr;
- to_spill = to_spill->next) {
- auto instr = sequence->InstructionAt(to_spill->gap_index);
- auto move = instr->GetOrCreateParallelMove(Instruction::START, zone);
- // Skip insertion if it's possible that the move exists already as a
- // constraint move from a fixed output register to a slot.
- if (might_be_duplicated) {
- bool found = false;
- for (auto move_op : *move) {
- if (move_op->IsEliminated()) continue;
- if (move_op->source().Equals(*to_spill->operand) &&
- move_op->destination().Equals(op)) {
- found = true;
- break;
- }
- }
- if (found) continue;
- }
- move->AddMove(*to_spill->operand, op);
- }
-}
-
-
UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
for (auto pos = first_pos_; pos != nullptr; pos = pos->next()) {
if (pos->HintRegister(register_index)) return pos;
@@ -438,22 +310,6 @@ UsePosition* LiveRange::FirstHintPosition(int* register_index) const {
}
-void LiveRange::SetSpillOperand(InstructionOperand* operand) {
- DCHECK(HasNoSpillType());
- DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
- set_spill_type(SpillType::kSpillOperand);
- spill_operand_ = operand;
-}
-
-
-void LiveRange::SetSpillRange(SpillRange* spill_range) {
- DCHECK(HasNoSpillType() || HasSpillRange());
- DCHECK(spill_range);
- set_spill_type(SpillType::kSpillRange);
- spill_range_ = spill_range;
-}
-
-
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
UsePosition* use_pos = last_processed_use_;
if (use_pos == nullptr || use_pos->pos() > start) {
@@ -517,6 +373,9 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
}
+bool LiveRange::IsTopLevel() const { return top_level_ == this; }
+
+
InstructionOperand LiveRange::GetAssignedOperand() const {
if (HasRegisterAssigned()) {
DCHECK(!spilled());
@@ -538,20 +397,6 @@ InstructionOperand LiveRange::GetAssignedOperand() const {
}
-AllocatedOperand LiveRange::GetSpillRangeOperand() const {
- auto spill_range = GetSpillRange();
- int index = spill_range->assigned_slot();
- switch (kind()) {
- case GENERAL_REGISTERS:
- return StackSlotOperand(machine_type(), index);
- case DOUBLE_REGISTERS:
- return DoubleStackSlotOperand(machine_type(), index);
- }
- UNREACHABLE();
- return StackSlotOperand(kMachNone, 0);
-}
-
-
UseInterval* LiveRange::FirstSearchIntervalForPosition(
LifetimePosition position) const {
if (current_interval_ == nullptr) return first_interval_;
@@ -575,9 +420,25 @@ void LiveRange::AdvanceLastProcessedMarker(
}
-void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
- Zone* zone) {
+LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
+ int new_id = TopLevel()->GetNextChildId();
+ LiveRange* child = new (zone) LiveRange(new_id, machine_type(), TopLevel());
+ DetachAt(position, child, zone);
+
+ child->top_level_ = TopLevel();
+ child->next_ = next_;
+ next_ = child;
+ if (child->next() == nullptr) {
+ TopLevel()->set_last_child(child);
+ }
+ return child;
+}
+
+
+void LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
+ Zone* zone) {
DCHECK(Start() < position);
+ DCHECK(End() > position);
DCHECK(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
@@ -651,12 +512,6 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
last_processed_use_ = nullptr;
current_interval_ = nullptr;
- // Link the new live range in the chain before any of the other
- // ranges linked from the range before the split.
- result->parent_ = (parent_ == nullptr) ? this : parent_;
- result->next_ = next_;
- next_ = result;
-
// Invalidate size and weight of this range. The child range has them
// invalid at construction.
size_ = kInvalidSize;
@@ -668,104 +523,19 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
}
-// This implements an ordering on live ranges so that they are ordered by their
-// start positions. This is needed for the correctness of the register
-// allocation algorithm. If two live ranges start at the same offset then there
-// is a tie breaker based on where the value is first used. This part of the
-// ordering is merely a heuristic.
-bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
- LifetimePosition start = Start();
- LifetimePosition other_start = other->Start();
- if (start == other_start) {
- UsePosition* pos = first_pos();
- if (pos == nullptr) return false;
- UsePosition* other_pos = other->first_pos();
- if (other_pos == nullptr) return true;
- return pos->pos() < other_pos->pos();
- }
- return start < other_start;
-}
-
-
-void LiveRange::ShortenTo(LifetimePosition start) {
- TRACE("Shorten live range %d to [%d\n", id_, start.value());
- DCHECK(first_interval_ != nullptr);
- DCHECK(first_interval_->start() <= start);
- DCHECK(start < first_interval_->end());
- first_interval_->set_start(start);
-}
-
-
-void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
- Zone* zone) {
- TRACE("Ensure live range %d in interval [%d %d[\n", id_, start.value(),
- end.value());
- auto new_end = end;
- while (first_interval_ != nullptr && first_interval_->start() <= end) {
- if (first_interval_->end() > end) {
- new_end = first_interval_->end();
- }
- first_interval_ = first_interval_->next();
- }
-
- auto new_interval = new (zone) UseInterval(start, new_end);
- new_interval->set_next(first_interval_);
- first_interval_ = new_interval;
- if (new_interval->next() == nullptr) {
- last_interval_ = new_interval;
- }
-}
-
+void LiveRange::AppendAsChild(TopLevelLiveRange* other) {
+ next_ = other;
-void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
- Zone* zone) {
- TRACE("Add to live range %d interval [%d %d[\n", id_, start.value(),
- end.value());
- if (first_interval_ == nullptr) {
- auto interval = new (zone) UseInterval(start, end);
- first_interval_ = interval;
- last_interval_ = interval;
- } else {
- if (end == first_interval_->start()) {
- first_interval_->set_start(start);
- } else if (end < first_interval_->start()) {
- auto interval = new (zone) UseInterval(start, end);
- interval->set_next(first_interval_);
- first_interval_ = interval;
- } else {
- // Order of instruction's processing (see ProcessInstructions) guarantees
- // that each new use interval either precedes or intersects with
- // last added interval.
- DCHECK(start < first_interval_->end());
- first_interval_->set_start(Min(start, first_interval_->start()));
- first_interval_->set_end(Max(end, first_interval_->end()));
- }
- }
+ other->UpdateParentForAllChildren(TopLevel());
+ TopLevel()->UpdateSpillRangePostMerge(other);
+ TopLevel()->set_last_child(other->last_child());
}
-void LiveRange::AddUsePosition(UsePosition* use_pos) {
- auto pos = use_pos->pos();
- TRACE("Add to live range %d use position %d\n", id_, pos.value());
- UsePosition* prev_hint = nullptr;
- UsePosition* prev = nullptr;
- auto current = first_pos_;
- while (current != nullptr && current->pos() < pos) {
- prev_hint = current->HasHint() ? current : prev_hint;
- prev = current;
- current = current->next();
- }
-
- if (prev == nullptr) {
- use_pos->set_next(first_pos_);
- first_pos_ = use_pos;
- } else {
- use_pos->set_next(prev->next());
- prev->set_next(use_pos);
- }
-
- if (prev_hint == nullptr && use_pos->HasHint()) {
- current_hint_position_ = use_pos;
+void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
+ LiveRange* child = this;
+ for (; child != nullptr; child = child->next()) {
+ child->top_level_ = new_top_level;
}
}
@@ -791,6 +561,25 @@ void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
}
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions. This is needed for the correctness of the register
+// allocation algorithm. If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used. This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+ LifetimePosition start = Start();
+ LifetimePosition other_start = other->Start();
+ if (start == other_start) {
+ UsePosition* pos = first_pos();
+ if (pos == nullptr) return false;
+ UsePosition* other_pos = other->first_pos();
+ if (other_pos == nullptr) return true;
+ return pos->pos() < other_pos->pos();
+ }
+ return start < other_start;
+}
+
+
void LiveRange::SetUseHints(int register_index) {
for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
if (!pos->HasOperand()) continue;
@@ -864,6 +653,382 @@ unsigned LiveRange::GetSize() {
}
+struct TopLevelLiveRange::SpillAtDefinitionList : ZoneObject {
+ SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
+ SpillAtDefinitionList* next)
+ : gap_index(gap_index), operand(operand), next(next) {}
+ const int gap_index;
+ InstructionOperand* const operand;
+ SpillAtDefinitionList* const next;
+};
+
+
+TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineType machine_type)
+ : LiveRange(0, machine_type, this),
+ vreg_(vreg),
+ last_child_id_(0),
+ splintered_from_(nullptr),
+ spill_operand_(nullptr),
+ spills_at_definition_(nullptr),
+ spilled_in_deferred_blocks_(false),
+ spill_start_index_(kMaxInt),
+ last_child_(this),
+ last_insertion_point_(this) {
+ bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
+}
+
+
+void TopLevelLiveRange::SpillAtDefinition(Zone* zone, int gap_index,
+ InstructionOperand* operand) {
+ DCHECK(HasNoSpillType());
+ spills_at_definition_ = new (zone)
+ SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
+}
+
+
+bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
+ InstructionSequence* code, const InstructionOperand& spill_operand) {
+ if (!FLAG_turbo_preprocess_ranges || IsEmpty() || HasNoSpillType() ||
+ spill_operand.IsConstant() || spill_operand.IsImmediate()) {
+ return false;
+ }
+
+ int count = 0;
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ int first_instr = child->Start().ToInstructionIndex();
+
+ // If the range starts at instruction end, the first instruction index is
+ // the next one.
+ if (!child->Start().IsGapPosition() && !child->Start().IsStart()) {
+ ++first_instr;
+ }
+
+ // We only look at where the range starts. It doesn't matter where it ends:
+ // if it ends past this block, then either there is a phi there already,
+ // or ResolveControlFlow will adapt the last instruction gap of this block
+ // as if there were a phi. In either case, data flow will be correct.
+ const InstructionBlock* block = code->GetInstructionBlock(first_instr);
+
+ // If we have slot uses in a subrange, bail out, because we need the value
+ // on the stack before that use.
+ bool has_slot_use = child->NextSlotPosition(child->Start()) != nullptr;
+ if (!block->IsDeferred()) {
+ if (child->spilled() || has_slot_use) {
+ TRACE(
+ "Live Range %d must be spilled at definition: found a "
+ "slot-requiring non-deferred child range %d.\n",
+ TopLevel()->vreg(), child->relative_id());
+ return false;
+ }
+ } else {
+ if (child->spilled() || has_slot_use) ++count;
+ }
+ }
+ if (count == 0) return false;
+
+ spill_start_index_ = -1;
+ spilled_in_deferred_blocks_ = true;
+
+ TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
+ // If we have ranges that aren't spilled but require the operand on the stack,
+ // make sure we insert the spill.
+ for (const LiveRange* child = this; child != nullptr; child = child->next()) {
+ if (!child->spilled() &&
+ child->NextSlotPosition(child->Start()) != nullptr) {
+ auto instr = code->InstructionAt(child->Start().ToInstructionIndex());
+ // Insert spill at the end to let live range connections happen at START.
+ auto move =
+ instr->GetOrCreateParallelMove(Instruction::END, code->zone());
+ InstructionOperand assigned = child->GetAssignedOperand();
+ if (TopLevel()->has_slot_use()) {
+ bool found = false;
+ for (auto move_op : *move) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source().Equals(assigned) &&
+ move_op->destination().Equals(spill_operand)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ }
+
+ move->AddMove(assigned, spill_operand);
+ }
+ }
+
+ return true;
+}
+
+
+void TopLevelLiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
+ const InstructionOperand& op,
+ bool might_be_duplicated) {
+ DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
+ auto zone = sequence->zone();
+
+ for (auto to_spill = spills_at_definition_; to_spill != nullptr;
+ to_spill = to_spill->next) {
+ auto instr = sequence->InstructionAt(to_spill->gap_index);
+ auto move = instr->GetOrCreateParallelMove(Instruction::START, zone);
+ // Skip insertion if it's possible that the move exists already as a
+ // constraint move from a fixed output register to a slot.
+ if (might_be_duplicated) {
+ bool found = false;
+ for (auto move_op : *move) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source().Equals(*to_spill->operand) &&
+ move_op->destination().Equals(op)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ }
+ move->AddMove(*to_spill->operand, op);
+ }
+}
+
+
+void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
+ DCHECK(HasNoSpillType());
+ DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
+ set_spill_type(SpillType::kSpillOperand);
+ spill_operand_ = operand;
+}
+
+
+void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
+ DCHECK(!HasSpillOperand());
+ DCHECK(spill_range);
+ spill_range_ = spill_range;
+}
+
+
+AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
+ auto spill_range = GetSpillRange();
+ int index = spill_range->assigned_slot();
+ switch (kind()) {
+ case GENERAL_REGISTERS:
+ return StackSlotOperand(machine_type(), index);
+ case DOUBLE_REGISTERS:
+ return DoubleStackSlotOperand(machine_type(), index);
+ }
+ UNREACHABLE();
+ return StackSlotOperand(kMachNone, 0);
+}
+
+
+void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end,
+ TopLevelLiveRange* result, Zone* zone) {
+ DCHECK(start != Start() || end != End());
+ DCHECK(start < end);
+
+ result->set_spill_type(spill_type());
+
+ if (start <= Start()) {
+ // TODO(mtrofin): here, the TopLevel part is in the deferred range, so we
+ // may want to continue processing the splinter. However, if the value is
+ // defined in a cold block, and then used in a hot block, it follows that
+ // it should terminate on the RHS of a phi, defined on the hot path. We
+ // should check this, however, this may not be the place, because we don't
+ // have access to the instruction sequence.
+ DCHECK(end < End());
+ DetachAt(end, result, zone);
+ next_ = nullptr;
+ } else if (end >= End()) {
+ DCHECK(start > Start());
+ DetachAt(start, result, zone);
+ next_ = nullptr;
+ } else {
+ DCHECK(start < End() && Start() < end);
+
+ const int kInvalidId = std::numeric_limits<int>::max();
+
+ DetachAt(start, result, zone);
+
+ LiveRange end_part(kInvalidId, this->machine_type(), nullptr);
+ result->DetachAt(end, &end_part, zone);
+
+ next_ = end_part.next_;
+ last_interval_->set_next(end_part.first_interval_);
+ // The next splinter will happen either at or after the current interval.
+ // We can optimize DetachAt by setting current_interval_ accordingly,
+ // which will then be picked up by FirstSearchIntervalForPosition.
+ current_interval_ = last_interval_;
+ last_interval_ = end_part.last_interval_;
+
+
+ if (first_pos_ == nullptr) {
+ first_pos_ = end_part.first_pos_;
+ } else {
+ UsePosition* pos = first_pos_;
+ for (; pos->next() != nullptr; pos = pos->next()) {
+ }
+ pos->set_next(end_part.first_pos_);
+ }
+ }
+ result->next_ = nullptr;
+ result->top_level_ = result;
+
+ result->SetSplinteredFrom(this);
+ // Ensure the result's relative ID is unique within the IDs used for this
+ // virtual register's children and splinters.
+ result->relative_id_ = GetNextChildId();
+}
+
+
+void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) {
+ // The splinter parent is always the original "Top".
+ DCHECK(splinter_parent->Start() < Start());
+
+ splintered_from_ = splinter_parent;
+ if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) {
+ SetSpillRange(splinter_parent->spill_range_);
+ }
+}
+
+
+void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) {
+ DCHECK(merged->TopLevel() == this);
+
+ if (HasNoSpillType() && merged->HasSpillRange()) {
+ set_spill_type(merged->spill_type());
+ DCHECK(GetSpillRange()->live_ranges().size() > 0);
+ merged->spill_range_ = nullptr;
+ merged->bits_ =
+ SpillTypeField::update(merged->bits_, SpillType::kNoSpillType);
+ }
+}
+
+
+void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) {
+ DCHECK(Start() < other->Start());
+ DCHECK(other->splintered_from() == this);
+
+ LiveRange* last_other = other->last_child();
+ LiveRange* last_me = last_child();
+
+ // Simple case: we just append at the end.
+ if (last_me->End() <= other->Start()) return last_me->AppendAsChild(other);
+
+ DCHECK(last_me->End() > last_other->End());
+
+ // In the more general case, we need to find the ranges between which to
+ // insert.
+ if (other->Start() < last_insertion_point_->Start()) {
+ last_insertion_point_ = this;
+ }
+
+ for (; last_insertion_point_->next() != nullptr &&
+ last_insertion_point_->next()->Start() <= other->Start();
+ last_insertion_point_ = last_insertion_point_->next()) {
+ }
+
+ // When we splintered the original range, we reconstituted the original range
+ // into one range without children, but with discontinuities. To merge the
+ // splinter back in, we need to split the range - or a child obtained after
+ // register allocation splitting.
+ LiveRange* after = last_insertion_point_->next();
+ if (last_insertion_point_->End() > other->Start()) {
+ LiveRange* new_after = last_insertion_point_->SplitAt(other->Start(), zone);
+ new_after->set_spilled(last_insertion_point_->spilled());
+ if (!new_after->spilled())
+ new_after->set_assigned_register(
+ last_insertion_point_->assigned_register());
+ after = new_after;
+ }
+
+ last_other->next_ = after;
+ last_insertion_point_->next_ = other;
+ other->UpdateParentForAllChildren(TopLevel());
+ TopLevel()->UpdateSpillRangePostMerge(other);
+}
+
+
+void TopLevelLiveRange::ShortenTo(LifetimePosition start) {
+ TRACE("Shorten live range %d to [%d\n", vreg(), start.value());
+ DCHECK(first_interval_ != nullptr);
+ DCHECK(first_interval_->start() <= start);
+ DCHECK(start < first_interval_->end());
+ first_interval_->set_start(start);
+}
+
+
+void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
+ LifetimePosition end, Zone* zone) {
+ TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(),
+ end.value());
+ auto new_end = end;
+ while (first_interval_ != nullptr && first_interval_->start() <= end) {
+ if (first_interval_->end() > end) {
+ new_end = first_interval_->end();
+ }
+ first_interval_ = first_interval_->next();
+ }
+
+ auto new_interval = new (zone) UseInterval(start, new_end);
+ new_interval->set_next(first_interval_);
+ first_interval_ = new_interval;
+ if (new_interval->next() == nullptr) {
+ last_interval_ = new_interval;
+ }
+}
+
+
+void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
+ LifetimePosition end, Zone* zone) {
+ TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(),
+ end.value());
+ if (first_interval_ == nullptr) {
+ auto interval = new (zone) UseInterval(start, end);
+ first_interval_ = interval;
+ last_interval_ = interval;
+ } else {
+ if (end == first_interval_->start()) {
+ first_interval_->set_start(start);
+ } else if (end < first_interval_->start()) {
+ auto interval = new (zone) UseInterval(start, end);
+ interval->set_next(first_interval_);
+ first_interval_ = interval;
+ } else {
+ // Order of instruction's processing (see ProcessInstructions) guarantees
+ // that each new use interval either precedes or intersects with
+ // last added interval.
+ DCHECK(start < first_interval_->end());
+ first_interval_->set_start(Min(start, first_interval_->start()));
+ first_interval_->set_end(Max(end, first_interval_->end()));
+ }
+ }
+}
+
+
+void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) {
+ auto pos = use_pos->pos();
+ TRACE("Add to live range %d use position %d\n", vreg(), pos.value());
+ UsePosition* prev_hint = nullptr;
+ UsePosition* prev = nullptr;
+ auto current = first_pos_;
+ while (current != nullptr && current->pos() < pos) {
+ prev_hint = current->HasHint() ? current : prev_hint;
+ prev = current;
+ current = current->next();
+ }
+
+ if (prev == nullptr) {
+ use_pos->set_next(first_pos_);
+ first_pos_ = use_pos;
+ } else {
+ use_pos->set_next(prev->next());
+ prev->set_next(use_pos);
+ }
+
+ if (prev_hint == nullptr && use_pos->HasHint()) {
+ current_hint_position_ = use_pos;
+ }
+}
+
+
static bool AreUseIntervalsIntersecting(UseInterval* interval1,
UseInterval* interval2) {
while (interval1 != nullptr && interval2 != nullptr) {
@@ -886,9 +1051,10 @@ static bool AreUseIntervalsIntersecting(UseInterval* interval1,
std::ostream& operator<<(std::ostream& os,
const PrintableLiveRange& printable_range) {
const LiveRange* range = printable_range.range_;
- os << "Range: " << range->id() << " ";
- if (range->is_phi()) os << "phi ";
- if (range->is_non_loop_phi()) os << "nlphi ";
+ os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
+ << " ";
+ if (range->TopLevel()->is_phi()) os << "phi ";
+ if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
os << "{" << std::endl;
auto interval = range->first_interval();
@@ -896,8 +1062,10 @@ std::ostream& operator<<(std::ostream& os,
PrintableInstructionOperand pio;
pio.register_configuration_ = printable_range.register_configuration_;
while (use_pos != nullptr) {
- pio.op_ = *use_pos->operand();
- os << pio << use_pos->pos() << " ";
+ if (use_pos->HasOperand()) {
+ pio.op_ = *use_pos->operand();
+ os << pio << use_pos->pos() << " ";
+ }
use_pos = use_pos->next();
}
os << std::endl;
@@ -912,13 +1080,19 @@ std::ostream& operator<<(std::ostream& os,
}
-SpillRange::SpillRange(LiveRange* parent, Zone* zone)
- : live_ranges_(zone), assigned_slot_(kUnassignedSlot) {
- DCHECK(!parent->IsChild());
+SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
+ : live_ranges_(zone),
+ assigned_slot_(kUnassignedSlot),
+ byte_width_(GetByteWidth(parent->machine_type())),
+ kind_(parent->kind()) {
+ // Spill ranges are created for top level, non-splintered ranges. This is so
+ // that, when merging decisions are made, we consider the full extent of the
+ // virtual register, and avoid clobbering it.
+ DCHECK(!parent->IsSplinter());
UseInterval* result = nullptr;
UseInterval* node = nullptr;
// Copy the intervals for all ranges.
- for (auto range = parent; range != nullptr; range = range->next()) {
+ for (LiveRange* range = parent; range != nullptr; range = range->next()) {
auto src = range->first_interval();
while (src != nullptr) {
auto new_node = new (zone) UseInterval(src->start(), src->end());
@@ -934,7 +1108,6 @@ SpillRange::SpillRange(LiveRange* parent, Zone* zone)
use_interval_ = result;
live_ranges().push_back(parent);
end_position_ = node->end();
- DCHECK(!parent->HasSpillRange());
parent->SetSpillRange(this);
}
@@ -1041,13 +1214,14 @@ RegisterAllocationData::RegisterAllocationData(
config_(config),
phi_map_(allocation_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
+ live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
allocation_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
allocation_zone()),
fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
allocation_zone()),
- spill_ranges_(allocation_zone()),
+ spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
@@ -1056,7 +1230,6 @@ RegisterAllocationData::RegisterAllocationData(
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
RegisterConfiguration::kMaxDoubleRegisters);
- spill_ranges().reserve(8);
assigned_registers_ = new (code_zone())
BitVector(this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
@@ -1081,7 +1254,7 @@ MachineType RegisterAllocationData::MachineTypeFor(int virtual_register) {
}
-LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
+TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
@@ -1094,21 +1267,26 @@ LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
}
-LiveRange* RegisterAllocationData::NewLiveRange(int index,
- MachineType machine_type) {
- return new (allocation_zone()) LiveRange(index, machine_type);
+TopLevelLiveRange* RegisterAllocationData::NewLiveRange(
+ int index, MachineType machine_type) {
+ return new (allocation_zone()) TopLevelLiveRange(index, machine_type);
}
-LiveRange* RegisterAllocationData::NewChildRangeFor(LiveRange* range) {
+int RegisterAllocationData::GetNextLiveRangeId() {
int vreg = virtual_register_count_++;
if (vreg >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(vreg + 1, nullptr);
}
- auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type());
- DCHECK_NULL(live_ranges()[vreg]);
- live_ranges()[vreg] = child;
- return child;
+ return vreg;
+}
+
+
+TopLevelLiveRange* RegisterAllocationData::NextLiveRange(
+ MachineType machine_type) {
+ int vreg = GetNextLiveRangeId();
+ TopLevelLiveRange* ret = NewLiveRange(vreg, machine_type);
+ return ret;
}
@@ -1132,6 +1310,12 @@ RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
}
+RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor(
+ TopLevelLiveRange* top_range) {
+ return GetPhiMapValueFor(top_range->vreg());
+}
+
+
bool RegisterAllocationData::ExistsUseWithoutDefinition() {
bool found = false;
BitVector::Iterator iterator(live_in_sets()[0]);
@@ -1140,7 +1324,7 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
int operand_index = iterator.Current();
PrintF("Register allocator error: live v%d reached first block.\n",
operand_index);
- LiveRange* range = LiveRangeFor(operand_index);
+ LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
PrintF(" (first use is at %d)\n", range->first_pos()->pos().value());
if (debug_name() == nullptr) {
PrintF("\n");
@@ -1154,10 +1338,31 @@ bool RegisterAllocationData::ExistsUseWithoutDefinition() {
SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange(
- LiveRange* range) {
+ TopLevelLiveRange* range) {
+ DCHECK(!range->HasSpillOperand());
+
+ SpillRange* spill_range = range->GetAllocatedSpillRange();
+ if (spill_range == nullptr) {
+ DCHECK(!range->IsSplinter());
+ spill_range = new (allocation_zone()) SpillRange(range, allocation_zone());
+ }
+ range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange);
+
+ int spill_range_index =
+ range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg();
+
+ spill_ranges()[spill_range_index] = spill_range;
+
+ return spill_range;
+}
+
+
+SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange(
+ TopLevelLiveRange* range) {
+ DCHECK(!range->HasSpillOperand());
+ DCHECK(!range->IsSplinter());
auto spill_range =
new (allocation_zone()) SpillRange(range, allocation_zone());
- spill_ranges().push_back(spill_range);
return spill_range;
}
@@ -1230,6 +1435,23 @@ void RegisterAllocationData::Print(const MoveOperands* move) {
}
+void RegisterAllocationData::Print(const SpillRange* spill_range) {
+ OFStream os(stdout);
+ os << "{" << std::endl;
+ for (TopLevelLiveRange* range : spill_range->live_ranges()) {
+ os << range->vreg() << " ";
+ }
+ os << std::endl;
+
+ for (UseInterval* interval = spill_range->interval(); interval != nullptr;
+ interval = interval->next()) {
+ os << '[' << interval->start() << ", " << interval->end() << ')'
+ << std::endl;
+ }
+ os << "}" << std::endl;
+}
+
+
ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data)
: data_(data) {}
@@ -1263,7 +1485,7 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
InstructionOperand::ReplaceWith(operand, &allocated);
if (is_tagged) {
TRACE("Fixed reg is tagged at %d\n", pos);
- auto instr = InstructionAt(pos);
+ auto instr = code()->InstructionAt(pos);
if (instr->HasReferenceMap()) {
instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
}
@@ -1295,13 +1517,13 @@ void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block) {
int end = block->last_instruction_index();
- auto last_instruction = InstructionAt(end);
+ auto last_instruction = code()->InstructionAt(end);
for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
auto output_operand = last_instruction->OutputAt(i);
DCHECK(!output_operand->IsConstant());
auto output = UnallocatedOperand::cast(output_operand);
int output_vreg = output->virtual_register();
- auto range = LiveRangeFor(output_vreg);
+ auto range = data()->GetOrCreateLiveRangeFor(output_vreg);
bool assigned = false;
if (output->HasFixedPolicy()) {
AllocateFixed(output, -1, false);
@@ -1339,7 +1561,7 @@ void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
- auto first = InstructionAt(instr_index);
+ auto first = code()->InstructionAt(instr_index);
// Handle fixed temporaries.
for (size_t i = 0; i < first->TempCount(); i++) {
auto temp = UnallocatedOperand::cast(first->TempAt(i));
@@ -1350,18 +1572,19 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
InstructionOperand* output = first->OutputAt(i);
if (output->IsConstant()) {
int output_vreg = ConstantOperand::cast(output)->virtual_register();
- auto range = LiveRangeFor(output_vreg);
+ auto range = data()->GetOrCreateLiveRangeFor(output_vreg);
range->SetSpillStartIndex(instr_index + 1);
range->SetSpillOperand(output);
continue;
}
auto first_output = UnallocatedOperand::cast(output);
- auto range = LiveRangeFor(first_output->virtual_register());
+ auto range =
+ data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
int output_vreg = first_output->virtual_register();
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
- bool is_tagged = IsReference(output_vreg);
+ bool is_tagged = code()->IsReference(output_vreg);
AllocateFixed(first_output, instr_index, is_tagged);
// This value is produced on the stack, we never need to spill it.
@@ -1387,7 +1610,7 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
- auto second = InstructionAt(instr_index);
+ auto second = code()->InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
auto input = second->InputAt(i);
@@ -1396,7 +1619,7 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
- bool is_tagged = IsReference(input_vreg);
+ bool is_tagged = code()->IsReference(input_vreg);
AllocateFixed(cur_input, instr_index, is_tagged);
data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
}
@@ -1416,13 +1639,14 @@ void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
cur_input->set_virtual_register(second_output->virtual_register());
auto gap_move = data()->AddGapMove(instr_index, Instruction::END,
input_copy, *cur_input);
- if (IsReference(input_vreg) && !IsReference(output_vreg)) {
+ if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
RegisterAllocationData::DelayedReference delayed_reference = {
second->reference_map(), &gap_move->source()};
data()->delayed_references().push_back(delayed_reference);
}
- } else if (!IsReference(input_vreg) && IsReference(output_vreg)) {
+ } else if (!code()->IsReference(input_vreg) &&
+ code()->IsReference(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
// before the pointer map can be used. I.e. the pointer map at the
// instruction will include the output operand (whose value at the
@@ -1455,10 +1679,11 @@ void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
auto move = data()->AddGapMove(cur_block->last_instruction_index(),
Instruction::END, input, output);
map_value->AddOperand(&move->destination());
- DCHECK(!InstructionAt(cur_block->last_instruction_index())
+ DCHECK(!code()
+ ->InstructionAt(cur_block->last_instruction_index())
->HasReferenceMap());
}
- auto live_range = LiveRangeFor(phi_vreg);
+ auto live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
int gap_index = block->first_instruction_index();
live_range->SpillAtDefinition(allocation_zone(), gap_index, &output);
live_range->SetSpillStartIndex(gap_index);
@@ -1474,27 +1699,35 @@ LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data,
: data_(data), phi_hints_(local_zone) {}
-BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block) {
- // Compute live out for the given block, except not including backward
- // successor edges.
- auto live_out = new (allocation_zone())
- BitVector(code()->VirtualRegisterCount(), allocation_zone());
-
- // Process all successor blocks.
- for (auto succ : block->successors()) {
- // Add values live on entry to the successor. Note the successor's
- // live_in will not be computed yet for backwards edges.
- auto live_in = live_in_sets()[succ.ToSize()];
- if (live_in != nullptr) live_out->Union(*live_in);
-
- // All phi input operands corresponding to this successor edge are live
- // out from this block.
- auto successor = code()->InstructionBlockAt(succ);
- size_t index = successor->PredecessorIndexOf(block->rpo_number());
- DCHECK(index < successor->PredecessorCount());
- for (auto phi : successor->phis()) {
- live_out->Add(phi->operands()[index]);
+BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block,
+ RegisterAllocationData* data) {
+ size_t block_index = block->rpo_number().ToSize();
+ BitVector* live_out = data->live_out_sets()[block_index];
+ if (live_out == nullptr) {
+ // Compute live out for the given block, except not including backward
+ // successor edges.
+ Zone* zone = data->allocation_zone();
+ const InstructionSequence* code = data->code();
+
+ live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone);
+
+ // Process all successor blocks.
+ for (const RpoNumber& succ : block->successors()) {
+ // Add values live on entry to the successor.
+ if (succ <= block->rpo_number()) continue;
+ BitVector* live_in = data->live_in_sets()[succ.ToSize()];
+ if (live_in != nullptr) live_out->Union(*live_in);
+
+ // All phi input operands corresponding to this successor edge are live
+ // out from this block.
+ auto successor = code->InstructionBlockAt(succ);
+ size_t index = successor->PredecessorIndexOf(block->rpo_number());
+ DCHECK(index < successor->PredecessorCount());
+ for (PhiInstruction* phi : successor->phis()) {
+ live_out->Add(phi->operands()[index]);
+ }
}
+ data->live_out_sets()[block_index] = live_out;
}
return live_out;
}
@@ -1511,7 +1744,7 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
- auto range = LiveRangeFor(operand_index);
+ auto range = data()->GetOrCreateLiveRangeFor(operand_index);
range->AddUseInterval(start, end, allocation_zone());
iterator.Advance();
}
@@ -1523,7 +1756,7 @@ int LiveRangeBuilder::FixedDoubleLiveRangeID(int index) {
}
-LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
+TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
auto result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
@@ -1538,7 +1771,7 @@ LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
}
-LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
+TopLevelLiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
DCHECK(index < config()->num_aliased_double_registers());
auto result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
@@ -1552,11 +1785,13 @@ LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
}
-LiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
+TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) {
if (operand->IsUnallocated()) {
- return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
+ return data()->GetOrCreateLiveRangeFor(
+ UnallocatedOperand::cast(operand)->virtual_register());
} else if (operand->IsConstant()) {
- return LiveRangeFor(ConstantOperand::cast(operand)->virtual_register());
+ return data()->GetOrCreateLiveRangeFor(
+ ConstantOperand::cast(operand)->virtual_register());
} else if (operand->IsRegister()) {
return FixedLiveRangeFor(RegisterOperand::cast(operand)->index());
} else if (operand->IsDoubleRegister()) {
@@ -1686,7 +1921,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int vreg = unalloc->virtual_register();
live->Add(vreg);
if (unalloc->HasSlotPolicy()) {
- LiveRangeFor(vreg)->set_has_slot_use(true);
+ data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true);
}
}
Use(block_start_position, use_pos, input);
@@ -1732,7 +1967,7 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
int phi_vreg = -1;
if (to.IsUnallocated()) {
int to_vreg = UnallocatedOperand::cast(to).virtual_register();
- auto to_range = LiveRangeFor(to_vreg);
+ auto to_range = data()->GetOrCreateLiveRangeFor(to_vreg);
if (to_range->is_phi()) {
phi_vreg = to_vreg;
if (to_range->is_non_loop_phi()) {
@@ -1817,7 +2052,7 @@ void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
code()->LastLoopInstructionIndex(block)).NextFullStart();
while (!iterator.Done()) {
int operand_index = iterator.Current();
- auto range = LiveRangeFor(operand_index);
+ TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->EnsureInterval(start, end, allocation_zone());
iterator.Advance();
}
@@ -1834,7 +2069,7 @@ void LiveRangeBuilder::BuildLiveRanges() {
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
auto block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
- auto live = ComputeLiveOut(block);
+ auto live = ComputeLiveOut(block, data());
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
AddInitialIntervals(block, live);
@@ -1899,7 +2134,7 @@ void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
}
- for (auto current : data()->live_ranges()) {
+ for (LiveRange* current : data()->live_ranges()) {
if (current != nullptr) current->Verify();
}
}
@@ -1914,8 +2149,9 @@ RegisterAllocator::RegisterAllocator(RegisterAllocationData* data,
LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
LifetimePosition pos) {
- DCHECK(!range->IsFixed());
- TRACE("Splitting live range %d at %d\n", range->id(), pos.value());
+ DCHECK(!range->TopLevel()->IsFixed());
+ TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(),
+ range->relative_id(), pos.value());
if (pos <= range->Start()) return range;
@@ -1925,8 +2161,7 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
(GetInstructionBlock(code(), pos)->last_instruction_index() !=
pos.ToInstructionIndex()));
- auto result = data()->NewChildRangeFor(range);
- range->SplitAt(pos, result, allocation_zone());
+ LiveRange* result = range->SplitAt(pos, allocation_zone());
return result;
}
@@ -1934,9 +2169,10 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
- DCHECK(!range->IsFixed());
- TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(),
- start.value(), end.value());
+ DCHECK(!range->TopLevel()->IsFixed());
+ TRACE("Splitting live range %d:%d in position between [%d, %d]\n",
+ range->TopLevel()->vreg(), range->relative_id(), start.value(),
+ end.value());
auto split_pos = FindOptimalSplitPos(start, end);
DCHECK(split_pos >= start);
@@ -2014,8 +2250,9 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
void RegisterAllocator::Spill(LiveRange* range) {
DCHECK(!range->spilled());
- TRACE("Spilling live range %d\n", range->id());
- auto first = range->TopLevel();
+ TopLevelLiveRange* first = range->TopLevel();
+ TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id());
+
if (first->HasNoSpillType()) {
data()->AssignSpillRangeToLiveRange(first);
}
@@ -2023,7 +2260,8 @@ void RegisterAllocator::Spill(LiveRange* range) {
}
-const ZoneVector<LiveRange*>& RegisterAllocator::GetFixedRegisters() const {
+const ZoneVector<TopLevelLiveRange*>& RegisterAllocator::GetFixedRegisters()
+ const {
return mode() == DOUBLE_REGISTERS ? data()->fixed_double_live_ranges()
: data()->fixed_live_ranges();
}
@@ -2060,7 +2298,7 @@ void LinearScanAllocator::AllocateRegisters() {
DCHECK(active_live_ranges().empty());
DCHECK(inactive_live_ranges().empty());
- for (auto range : data()->live_ranges()) {
+ for (LiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
if (range->kind() == mode()) {
AddToUnhandledUnsorted(range);
@@ -2086,10 +2324,12 @@ void LinearScanAllocator::AllocateRegisters() {
#ifdef DEBUG
allocation_finger_ = position;
#endif
- TRACE("Processing interval %d start=%d\n", current->id(), position.value());
+ TRACE("Processing interval %d:%d start=%d\n", current->TopLevel()->vreg(),
+ current->relative_id(), position.value());
- if (!current->HasNoSpillType()) {
- TRACE("Live range %d already has a spill operand\n", current->id());
+ if (current->IsTopLevel() && !current->TopLevel()->HasNoSpillType()) {
+ TRACE("Live range %d:%d already has a spill operand\n",
+ current->TopLevel()->vreg(), current->relative_id());
auto next_pos = position;
if (next_pos.IsGapPosition()) {
next_pos = next_pos.NextStart();
@@ -2109,7 +2349,8 @@ void LinearScanAllocator::AllocateRegisters() {
}
}
- if (TryReuseSpillForPhi(current)) continue;
+ if (current->IsTopLevel() && TryReuseSpillForPhi(current->TopLevel()))
+ continue;
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
auto cur_active = active_live_ranges()[i];
@@ -2149,20 +2390,22 @@ void LinearScanAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
data()->MarkAllocated(range->kind(), reg);
range->set_assigned_register(reg);
range->SetUseHints(reg);
- if (range->is_phi()) {
- data()->GetPhiMapValueFor(range->id())->set_assigned_register(reg);
+ if (range->IsTopLevel() && range->TopLevel()->is_phi()) {
+ data()->GetPhiMapValueFor(range->TopLevel())->set_assigned_register(reg);
}
}
void LinearScanAllocator::AddToActive(LiveRange* range) {
- TRACE("Add live range %d to active\n", range->id());
+ TRACE("Add live range %d:%d to active\n", range->TopLevel()->vreg(),
+ range->relative_id());
active_live_ranges().push_back(range);
}
void LinearScanAllocator::AddToInactive(LiveRange* range) {
- TRACE("Add live range %d to inactive\n", range->id());
+ TRACE("Add live range %d:%d to inactive\n", range->TopLevel()->vreg(),
+ range->relative_id());
inactive_live_ranges().push_back(range);
}
@@ -2175,13 +2418,15 @@ void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
--i) {
auto cur_range = unhandled_live_ranges().at(i);
if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
- TRACE("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+ TRACE("Add live range %d:%d to unhandled at %d\n",
+ range->TopLevel()->vreg(), range->relative_id(), i + 1);
auto it = unhandled_live_ranges().begin() + (i + 1);
unhandled_live_ranges().insert(it, range);
DCHECK(UnhandledIsSorted());
return;
}
- TRACE("Add live range %d to unhandled at start\n", range->id());
+ TRACE("Add live range %d:%d to unhandled at start\n",
+ range->TopLevel()->vreg(), range->relative_id());
unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
DCHECK(UnhandledIsSorted());
}
@@ -2190,7 +2435,8 @@ void LinearScanAllocator::AddToUnhandledSorted(LiveRange* range) {
void LinearScanAllocator::AddToUnhandledUnsorted(LiveRange* range) {
if (range == nullptr || range->IsEmpty()) return;
DCHECK(!range->HasRegisterAssigned() && !range->spilled());
- TRACE("Add live range %d to unhandled unsorted at end\n", range->id());
+ TRACE("Add live range %d:%d to unhandled unsorted at end\n",
+ range->TopLevel()->vreg(), range->relative_id());
unhandled_live_ranges().push_back(range);
}
@@ -2199,7 +2445,7 @@ static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
if (a->ShouldBeAllocatedBefore(b)) return false;
if (b->ShouldBeAllocatedBefore(a)) return true;
- return a->id() < b->id();
+ return a->TopLevel()->vreg() < b->TopLevel()->vreg();
}
@@ -2226,27 +2472,31 @@ bool LinearScanAllocator::UnhandledIsSorted() {
void LinearScanAllocator::ActiveToHandled(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
- TRACE("Moving live range %d from active to handled\n", range->id());
+ TRACE("Moving live range %d:%d from active to handled\n",
+ range->TopLevel()->vreg(), range->relative_id());
}
void LinearScanAllocator::ActiveToInactive(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
inactive_live_ranges().push_back(range);
- TRACE("Moving live range %d from active to inactive\n", range->id());
+ TRACE("Moving live range %d:%d from active to inactive\n",
+ range->TopLevel()->vreg(), range->relative_id());
}
void LinearScanAllocator::InactiveToHandled(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
- TRACE("Moving live range %d from inactive to handled\n", range->id());
+ TRACE("Moving live range %d:%d from inactive to handled\n",
+ range->TopLevel()->vreg(), range->relative_id());
}
void LinearScanAllocator::InactiveToActive(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
active_live_ranges().push_back(range);
- TRACE("Moving live range %d from inactive to active\n", range->id());
+ TRACE("Moving live range %d:%d from inactive to active\n",
+ range->TopLevel()->vreg(), range->relative_id());
}
@@ -2272,14 +2522,17 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
int hint_register;
if (current->FirstHintPosition(&hint_register) != nullptr) {
- TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(hint_register), free_until_pos[hint_register].value(),
- current->id(), current->End().value());
+ TRACE(
+ "Found reg hint %s (free until [%d) for live range %d:%d (end %d[).\n",
+ RegisterName(hint_register), free_until_pos[hint_register].value(),
+ current->TopLevel()->vreg(), current->relative_id(),
+ current->End().value());
// The desired register is free until the end of the current live range.
if (free_until_pos[hint_register] >= current->End()) {
- TRACE("Assigning preferred reg %s to live range %d\n",
- RegisterName(hint_register), current->id());
+ TRACE("Assigning preferred reg %s to live range %d:%d\n",
+ RegisterName(hint_register), current->TopLevel()->vreg(),
+ current->relative_id());
SetLiveRangeAssignedRegister(current, hint_register);
return true;
}
@@ -2310,8 +2563,8 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
// Register reg is available at the range start and is free until
// the range end.
DCHECK(pos >= current->End());
- TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg),
- current->id());
+ TRACE("Assigning free reg %s to live range %d:%d\n", RegisterName(reg),
+ current->TopLevel()->vreg(), current->relative_id());
SetLiveRangeAssignedRegister(current, reg);
return true;
@@ -2336,7 +2589,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
for (auto range : active_live_ranges()) {
int cur_reg = range->assigned_register();
- if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+ if (range->TopLevel()->IsFixed() ||
+ !range->CanBeSpilled(current->Start())) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
@@ -2355,7 +2609,7 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
auto next_intersection = range->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
- if (range->IsFixed()) {
+ if (range->TopLevel()->IsFixed()) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
@@ -2389,8 +2643,8 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
// Register reg is not blocked for the whole range.
DCHECK(block_pos[reg] >= current->End());
- TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
- current->id());
+ TRACE("Assigning blocked reg %s to live range %d:%d\n", RegisterName(reg),
+ current->TopLevel()->vreg(), current->relative_id());
SetLiveRangeAssignedRegister(current, reg);
// This register was not free. Thus we need to find and spill
@@ -2430,7 +2684,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
auto range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
- if (range->assigned_register() == reg && !range->IsFixed()) {
+ if (range->assigned_register() == reg && !range->TopLevel()->IsFixed()) {
LifetimePosition next_intersection = range->FirstIntersection(current);
if (next_intersection.IsValid()) {
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
@@ -2448,10 +2702,11 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
}
-bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) {
- if (range->IsChild() || !range->is_phi()) return false;
+bool LinearScanAllocator::TryReuseSpillForPhi(TopLevelLiveRange* range) {
+ if (!range->is_phi()) return false;
+
DCHECK(!range->HasSpillOperand());
- auto phi_map_value = data()->GetPhiMapValueFor(range->id());
+ auto phi_map_value = data()->GetPhiMapValueFor(range);
auto phi = phi_map_value->phi();
auto block = phi_map_value->block();
// Count the number of spilled operands.
@@ -2459,8 +2714,8 @@ bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) {
LiveRange* first_op = nullptr;
for (size_t i = 0; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
- LiveRange* op_range = LiveRangeFor(op);
- if (!op_range->HasSpillRange()) continue;
+ LiveRange* op_range = data()->GetOrCreateLiveRangeFor(op);
+ if (!op_range->TopLevel()->HasSpillRange()) continue;
auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
@@ -2483,11 +2738,11 @@ bool LinearScanAllocator::TryReuseSpillForPhi(LiveRange* range) {
// Try to merge the spilled operands and count the number of merged spilled
// operands.
DCHECK(first_op != nullptr);
- auto first_op_spill = first_op->GetSpillRange();
+ auto first_op_spill = first_op->TopLevel()->GetSpillRange();
size_t num_merged = 1;
for (size_t i = 1; i < phi->operands().size(); i++) {
int op = phi->operands()[i];
- auto op_range = LiveRangeFor(op);
+ auto op_range = data()->GetOrCreateLiveRangeFor(op);
if (!op_range->HasSpillRange()) continue;
auto op_spill = op_range->GetSpillRange();
if (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill)) {
@@ -2580,8 +2835,8 @@ SpillSlotLocator::SpillSlotLocator(RegisterAllocationData* data)
void SpillSlotLocator::LocateSpillSlots() {
auto code = data()->code();
- for (auto range : data()->live_ranges()) {
- if (range == nullptr || range->IsEmpty() || range->IsChild()) continue;
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
+ if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange()) continue;
auto spills = range->spills_at_definition();
@@ -2597,21 +2852,22 @@ OperandAssigner::OperandAssigner(RegisterAllocationData* data) : data_(data) {}
void OperandAssigner::AssignSpillSlots() {
- auto& spill_ranges = data()->spill_ranges();
+ ZoneVector<SpillRange*>& spill_ranges = data()->spill_ranges();
// Merge disjoint spill ranges
- for (size_t i = 0; i < spill_ranges.size(); i++) {
- auto range = spill_ranges[i];
+ for (size_t i = 0; i < spill_ranges.size(); ++i) {
+ SpillRange* range = spill_ranges[i];
+ if (range == nullptr) continue;
if (range->IsEmpty()) continue;
- for (size_t j = i + 1; j < spill_ranges.size(); j++) {
- auto other = spill_ranges[j];
- if (!other->IsEmpty()) {
+ for (size_t j = i + 1; j < spill_ranges.size(); ++j) {
+ SpillRange* other = spill_ranges[j];
+ if (other != nullptr && !other->IsEmpty()) {
range->TryMerge(other);
}
}
}
// Allocate slots for the merged spill ranges.
- for (auto range : spill_ranges) {
- if (range->IsEmpty()) continue;
+ for (SpillRange* range : spill_ranges) {
+ if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
int byte_width = range->ByteWidth();
int index = data()->frame()->AllocateSpillSlot(byte_width);
@@ -2621,20 +2877,25 @@ void OperandAssigner::AssignSpillSlots() {
void OperandAssigner::CommitAssignment() {
- for (auto range : data()->live_ranges()) {
- if (range == nullptr || range->IsEmpty()) continue;
+ for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ if (top_range == nullptr || top_range->IsEmpty()) continue;
InstructionOperand spill_operand;
- if (range->TopLevel()->HasSpillOperand()) {
- spill_operand = *range->TopLevel()->GetSpillOperand();
- } else if (range->TopLevel()->HasSpillRange()) {
- spill_operand = range->TopLevel()->GetSpillRangeOperand();
+ if (top_range->HasSpillOperand()) {
+ spill_operand = *top_range->TopLevel()->GetSpillOperand();
+ } else if (top_range->TopLevel()->HasSpillRange()) {
+ spill_operand = top_range->TopLevel()->GetSpillRangeOperand();
+ }
+ if (top_range->is_phi()) {
+ data()->GetPhiMapValueFor(top_range)->CommitAssignment(
+ top_range->GetAssignedOperand());
}
- auto assigned = range->GetAssignedOperand();
- range->ConvertUsesToOperand(assigned, spill_operand);
- if (range->is_phi()) {
- data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
+ for (LiveRange* range = top_range; range != nullptr;
+ range = range->next()) {
+ auto assigned = range->GetAssignedOperand();
+ range->ConvertUsesToOperand(assigned, spill_operand);
}
- if (!range->IsChild() && !spill_operand.IsInvalid()) {
+
+ if (!spill_operand.IsInvalid()) {
// If this top level range has a child spilled in a deferred block, we use
// the range and control flow connection mechanism instead of spilling at
// definition. Refer to the ConnectLiveRanges and ResolveControlFlow
@@ -2646,13 +2907,13 @@ void OperandAssigner::CommitAssignment() {
// moves between ranges. Because of how the ranges are split around
// deferred blocks, this amounts to spilling and filling inside such
// blocks.
- if (!range->TryCommitSpillInDeferredBlock(data()->code(),
- spill_operand)) {
+ if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
+ spill_operand)) {
// Spill at definition if the range isn't spilled only in deferred
// blocks.
- range->CommitSpillsAtDefinition(
+ top_range->CommitSpillsAtDefinition(
data()->code(), spill_operand,
- range->has_slot_use() || range->spilled());
+ top_range->has_slot_use() || top_range->spilled());
}
}
}
@@ -2685,19 +2946,17 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
int last_range_start = 0;
auto reference_maps = data()->code()->reference_maps();
ReferenceMapDeque::const_iterator first_it = reference_maps->begin();
- for (LiveRange* range : data()->live_ranges()) {
+ for (TopLevelLiveRange* range : data()->live_ranges()) {
if (range == nullptr) continue;
- // Iterate over the first parts of multi-part live ranges.
- if (range->IsChild()) continue;
// Skip non-reference values.
- if (!data()->IsReference(range->id())) continue;
+ if (!data()->IsReference(range)) continue;
// Skip empty live ranges.
if (range->IsEmpty()) continue;
// Find the extent of the range and its children.
int start = range->Start().ToInstructionIndex();
int end = 0;
- for (auto cur = range; cur != nullptr; cur = cur->next()) {
+ for (LiveRange* cur = range; cur != nullptr; cur = cur->next()) {
auto this_end = cur->End();
if (this_end.ToInstructionIndex() > end)
end = this_end.ToInstructionIndex();
@@ -2742,7 +3001,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// safe point position.
auto safe_point_pos =
LifetimePosition::InstructionFromInstructionIndex(safe_point);
- auto cur = range;
+ LiveRange* cur = range;
while (cur != nullptr && !cur->Covers(safe_point_pos)) {
cur = cur->next();
}
@@ -2756,15 +3015,16 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
if (!spill_operand.IsInvalid() && safe_point >= spill_index) {
TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), spill_index, safe_point);
+ range->vreg(), spill_index, safe_point);
map->RecordReference(AllocatedOperand::cast(spill_operand));
}
if (!cur->spilled()) {
TRACE(
- "Pointer in register for range %d (start at %d) "
+ "Pointer in register for range %d:%d (start at %d) "
"at safe point %d\n",
- cur->id(), cur->Start().value(), safe_point);
+ range->vreg(), cur->relative_id(), cur->Start().value(),
+ safe_point);
auto operand = cur->GetAssignedOperand();
DCHECK(!operand.IsStackSlot());
DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
@@ -2991,10 +3251,11 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
DelayedInsertionMap delayed_insertion_map(local_zone);
- for (auto first_range : data()->live_ranges()) {
- if (first_range == nullptr || first_range->IsChild()) continue;
- bool connect_spilled = first_range->IsSpilledOnlyInDeferredBlocks();
- for (auto second_range = first_range->next(); second_range != nullptr;
+ for (TopLevelLiveRange* top_range : data()->live_ranges()) {
+ if (top_range == nullptr) continue;
+ bool connect_spilled = top_range->IsSpilledOnlyInDeferredBlocks();
+ LiveRange* first_range = top_range;
+ for (LiveRange *second_range = first_range->next(); second_range != nullptr;
first_range = second_range, second_range = second_range->next()) {
auto pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index 2e63d36e12..117ddedbcd 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -273,28 +273,28 @@ class UsePosition final : public ZoneObject {
class SpillRange;
-
+class RegisterAllocationData;
+class TopLevelLiveRange;
+class LiveRangeGroup;
// Representation of SSA values' live ranges as a collection of (continuous)
// intervals over the instruction ordering.
-class LiveRange final : public ZoneObject {
+class LiveRange : public ZoneObject {
public:
- explicit LiveRange(int id, MachineType machine_type);
-
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
- LiveRange* parent() const { return parent_; }
- LiveRange* TopLevel() { return (parent_ == nullptr) ? this : parent_; }
- const LiveRange* TopLevel() const {
- return (parent_ == nullptr) ? this : parent_;
- }
+ TopLevelLiveRange* TopLevel() { return top_level_; }
+ const TopLevelLiveRange* TopLevel() const { return top_level_; }
+
+ bool IsTopLevel() const;
+
LiveRange* next() const { return next_; }
- bool IsChild() const { return parent() != nullptr; }
- int id() const { return id_; }
- bool IsFixed() const { return id_ < 0; }
+
+ int relative_id() const { return relative_id_; }
+
bool IsEmpty() const { return first_interval() == nullptr; }
+
InstructionOperand GetAssignedOperand() const;
- int spill_start_index() const { return spill_start_index_; }
MachineType machine_type() const { return MachineTypeField::decode(bits_); }
@@ -310,22 +310,6 @@ class LiveRange final : public ZoneObject {
RegisterKind kind() const;
- // Correct only for parent.
- bool is_phi() const { return IsPhiField::decode(bits_); }
- void set_is_phi(bool value) { bits_ = IsPhiField::update(bits_, value); }
-
- // Correct only for parent.
- bool is_non_loop_phi() const { return IsNonLoopPhiField::decode(bits_); }
- void set_is_non_loop_phi(bool value) {
- bits_ = IsNonLoopPhiField::update(bits_, value);
- }
-
- // Relevant only for parent.
- bool has_slot_use() const { return HasSlotUseField::decode(bits_); }
- void set_has_slot_use(bool value) {
- bits_ = HasSlotUseField::update(bits_, value);
- }
-
// Returns use position in this live range that follows both start
// and last processed use position.
UsePosition* NextUsePosition(LifetimePosition start) const;
@@ -350,11 +334,18 @@ class LiveRange final : public ZoneObject {
// Can this live range be spilled at this position.
bool CanBeSpilled(LifetimePosition pos) const;
- // Split this live range at the given position which must follow the start of
- // the range.
+ // Splitting primitive used by both splitting and splintering members.
+ // Performs the split, but does not link the resulting ranges.
+ // The given position must follow the start of the range.
// All uses following the given position will be moved from this
// live range to the result live range.
- void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+ // The current range will terminate at position, while result will start from
+ // position.
+ void DetachAt(LifetimePosition position, LiveRange* result, Zone* zone);
+
+ // Detaches at position, and then links the resulting ranges. Returns the
+ // child, which starts at position.
+ LiveRange* SplitAt(LifetimePosition position, Zone* zone);
// Returns nullptr when no register is hinted, otherwise sets register_index.
UsePosition* FirstHintPosition(int* register_index) const;
@@ -378,12 +369,151 @@ class LiveRange final : public ZoneObject {
return last_interval_->end();
}
+ bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+ bool CanCover(LifetimePosition position) const;
+ bool Covers(LifetimePosition position) const;
+ LifetimePosition FirstIntersection(LiveRange* other) const;
+
+ void Verify() const;
+
+ void ConvertUsesToOperand(const InstructionOperand& op,
+ const InstructionOperand& spill_op);
+ void SetUseHints(int register_index);
+ void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
+
+ // Used solely by the Greedy Allocator:
+ unsigned GetSize();
+ float weight() const { return weight_; }
+ void set_weight(float weight) { weight_ = weight; }
+ LiveRangeGroup* group() const { return group_; }
+ void set_group(LiveRangeGroup* group) { group_ = group; }
+
+ static const int kInvalidSize = -1;
+ static const float kInvalidWeight;
+ static const float kMaxWeight;
+
+ private:
+ friend class TopLevelLiveRange;
+ explicit LiveRange(int relative_id, MachineType machine_type,
+ TopLevelLiveRange* top_level);
+
+ void AppendAsChild(TopLevelLiveRange* other);
+ void UpdateParentForAllChildren(TopLevelLiveRange* new_top_level);
+
+ void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
+
+ UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+ void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+ LifetimePosition but_not_past) const;
+
+ typedef BitField<bool, 0, 1> SpilledField;
+ typedef BitField<int32_t, 6, 6> AssignedRegisterField;
+ typedef BitField<MachineType, 12, 15> MachineTypeField;
+
+ // Unique among children and splinters of the same virtual register.
+ int relative_id_;
+ uint32_t bits_;
+ UseInterval* last_interval_;
+ UseInterval* first_interval_;
+ UsePosition* first_pos_;
+ TopLevelLiveRange* top_level_;
+ LiveRange* next_;
+ // This is used as a cache, it doesn't affect correctness.
+ mutable UseInterval* current_interval_;
+ // This is used as a cache, it doesn't affect correctness.
+ mutable UsePosition* last_processed_use_;
+ // This is used as a cache, it's invalid outside of BuildLiveRanges.
+ mutable UsePosition* current_hint_position_;
+
+ // greedy: the number of LifetimePositions covered by this range. Used to
+ // prioritize selecting live ranges for register assignment, as well as
+ // in weight calculations.
+ int size_;
+
+ // greedy: a metric for resolving conflicts between ranges with an assigned
+ // register and ranges that intersect them and need a register.
+ float weight_;
+
+ // greedy: groupping
+ LiveRangeGroup* group_;
+
+ DISALLOW_COPY_AND_ASSIGN(LiveRange);
+};
+
+
+class LiveRangeGroup final : public ZoneObject {
+ public:
+ explicit LiveRangeGroup(Zone* zone) : ranges_(zone) {}
+ ZoneVector<LiveRange*>& ranges() { return ranges_; }
+ const ZoneVector<LiveRange*>& ranges() const { return ranges_; }
+
+ // TODO(mtrofin): populate assigned register and use in weight calculation.
+ int assigned_register() const { return assigned_register_; }
+ void set_assigned_register(int reg) { assigned_register_ = reg; }
+
+ private:
+ ZoneVector<LiveRange*> ranges_;
+ int assigned_register_;
+ DISALLOW_COPY_AND_ASSIGN(LiveRangeGroup);
+};
+
+
+class TopLevelLiveRange final : public LiveRange {
+ public:
+ explicit TopLevelLiveRange(int vreg, MachineType machine_type);
+ int spill_start_index() const { return spill_start_index_; }
+
+ bool IsFixed() const { return vreg_ < 0; }
+
+ bool is_phi() const { return IsPhiField::decode(bits_); }
+ void set_is_phi(bool value) { bits_ = IsPhiField::update(bits_, value); }
+
+ bool is_non_loop_phi() const { return IsNonLoopPhiField::decode(bits_); }
+ void set_is_non_loop_phi(bool value) {
+ bits_ = IsNonLoopPhiField::update(bits_, value);
+ }
+
+ bool has_slot_use() const { return HasSlotUseField::decode(bits_); }
+ void set_has_slot_use(bool value) {
+ bits_ = HasSlotUseField::update(bits_, value);
+ }
+
+ // Add a new interval or a new use position to this live range.
+ void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+ void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+ void AddUsePosition(UsePosition* pos);
+
+ // Shorten the most recently added interval by setting a new start.
+ void ShortenTo(LifetimePosition start);
+
+ // Detaches between start and end, and attributes the resulting range to
+ // result.
+ // The current range is pointed to as "splintered_from". No parent/child
+ // relationship is established between this and result.
+ void Splinter(LifetimePosition start, LifetimePosition end,
+ TopLevelLiveRange* result, Zone* zone);
+
+ // Assuming other was splintered from this range, embeds other and its
+ // children as part of the children sequence of this range.
+ void Merge(TopLevelLiveRange* other, Zone* zone);
+
+ // Spill range management.
+ void SetSpillRange(SpillRange* spill_range);
enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
+ void set_spill_type(SpillType value) {
+ bits_ = SpillTypeField::update(bits_, value);
+ }
SpillType spill_type() const { return SpillTypeField::decode(bits_); }
InstructionOperand* GetSpillOperand() const {
DCHECK(spill_type() == SpillType::kSpillOperand);
return spill_operand_;
}
+
+ SpillRange* GetAllocatedSpillRange() const {
+ DCHECK(spill_type() != SpillType::kSpillOperand);
+ return spill_range_;
+ }
+
SpillRange* GetSpillRange() const {
DCHECK(spill_type() == SpillType::kSpillRange);
return spill_range_;
@@ -395,16 +525,21 @@ class LiveRange final : public ZoneObject {
return spill_type() == SpillType::kSpillOperand;
}
bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
+
AllocatedOperand GetSpillRangeOperand() const;
void SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
- void SetSpillRange(SpillRange* spill_range);
+ void SetSpillStartIndex(int start) {
+ spill_start_index_ = Min(start, spill_start_index_);
+ }
+
+ void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
void CommitSpillsAtDefinition(InstructionSequence* sequence,
const InstructionOperand& operand,
bool might_be_duplicated);
- // This must be applied on top level ranges.
+
// If all the children of this range are spilled in deferred blocks, and if
// for any non-spilled child with a use position requiring a slot, that range
// is contained in a deferred block, mark the range as
@@ -415,102 +550,55 @@ class LiveRange final : public ZoneObject {
bool TryCommitSpillInDeferredBlock(InstructionSequence* code,
const InstructionOperand& spill_operand);
- void SetSpillStartIndex(int start) {
- spill_start_index_ = Min(start, spill_start_index_);
+ TopLevelLiveRange* splintered_from() const { return splintered_from_; }
+ bool IsSplinter() const { return splintered_from_ != nullptr; }
+ bool MayRequireSpillRange() const {
+ DCHECK(!IsSplinter());
+ return !HasSpillOperand() && spill_range_ == nullptr;
}
+ void UpdateSpillRangePostMerge(TopLevelLiveRange* merged);
+ int vreg() const { return vreg_; }
- bool ShouldBeAllocatedBefore(const LiveRange* other) const;
- bool CanCover(LifetimePosition position) const;
- bool Covers(LifetimePosition position) const;
- LifetimePosition FirstIntersection(LiveRange* other) const;
-
- // Add a new interval or a new use position to this live range.
- void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
- void AddUsePosition(UsePosition* pos);
-
- // Shorten the most recently added interval by setting a new start.
- void ShortenTo(LifetimePosition start);
-
- void Verify() const;
+ int GetNextChildId() {
+ return IsSplinter() ? splintered_from()->GetNextChildId()
+ : ++last_child_id_;
+ }
- void ConvertUsesToOperand(const InstructionOperand& op,
- const InstructionOperand& spill_op);
- void SetUseHints(int register_index);
- void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
+ bool IsSpilledOnlyInDeferredBlocks() const {
+ return spilled_in_deferred_blocks_;
+ }
struct SpillAtDefinitionList;
SpillAtDefinitionList* spills_at_definition() const {
return spills_at_definition_;
}
-
- // Used solely by the Greedy Allocator:
- unsigned GetSize();
- float weight() const { return weight_; }
- void set_weight(float weight) { weight_ = weight; }
-
- bool IsSpilledOnlyInDeferredBlocks() const {
- return spilled_in_deferred_block_;
- }
-
- static const int kInvalidSize = -1;
- static const float kInvalidWeight;
- static const float kMaxWeight;
+ void set_last_child(LiveRange* range) { last_child_ = range; }
+ LiveRange* last_child() const { return last_child_; }
private:
- void set_spill_type(SpillType value) {
- bits_ = SpillTypeField::update(bits_, value);
- }
-
- void set_spilled(bool value) { bits_ = SpilledField::update(bits_, value); }
-
- UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
- void AdvanceLastProcessedMarker(UseInterval* to_start_of,
- LifetimePosition but_not_past) const;
-
- typedef BitField<bool, 0, 1> SpilledField;
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField;
typedef BitField<SpillType, 4, 2> SpillTypeField;
- typedef BitField<int32_t, 6, 6> AssignedRegisterField;
- typedef BitField<MachineType, 12, 15> MachineTypeField;
- int id_;
- int spill_start_index_;
- uint32_t bits_;
- UseInterval* last_interval_;
- UseInterval* first_interval_;
- UsePosition* first_pos_;
- LiveRange* parent_;
- LiveRange* next_;
+ int vreg_;
+ int last_child_id_;
+ TopLevelLiveRange* splintered_from_;
union {
// Correct value determined by spill_type()
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
};
SpillAtDefinitionList* spills_at_definition_;
- // This is used as a cache, it doesn't affect correctness.
- mutable UseInterval* current_interval_;
- // This is used as a cache, it doesn't affect correctness.
- mutable UsePosition* last_processed_use_;
- // This is used as a cache, it's invalid outside of BuildLiveRanges.
- mutable UsePosition* current_hint_position_;
-
- // greedy: the number of LifetimePositions covered by this range. Used to
- // prioritize selecting live ranges for register assignment, as well as
- // in weight calculations.
- int size_;
-
- // greedy: a metric for resolving conflicts between ranges with an assigned
- // register and ranges that intersect them and need a register.
- float weight_;
-
// TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block.
- bool spilled_in_deferred_block_;
- DISALLOW_COPY_AND_ASSIGN(LiveRange);
+ bool spilled_in_deferred_blocks_;
+ int spill_start_index_;
+ LiveRange* last_child_;
+ LiveRange* last_insertion_point_;
+
+ DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
@@ -527,7 +615,7 @@ std::ostream& operator<<(std::ostream& os,
class SpillRange final : public ZoneObject {
public:
static const int kUnassignedSlot = -1;
- SpillRange(LiveRange* range, Zone* zone);
+ SpillRange(TopLevelLiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
// Currently, only 4 or 8 byte slots are supported.
@@ -543,18 +631,25 @@ class SpillRange final : public ZoneObject {
DCHECK_NE(kUnassignedSlot, assigned_slot_);
return assigned_slot_;
}
+ const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
+ return live_ranges_;
+ }
+ ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
+ int byte_width() const { return byte_width_; }
+ RegisterKind kind() const { return kind_; }
private:
LifetimePosition End() const { return end_position_; }
- ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
bool IsIntersectingWith(SpillRange* other) const;
// Merge intervals, making sure the use intervals are sorted
void MergeDisjointIntervals(UseInterval* other);
- ZoneVector<LiveRange*> live_ranges_;
+ ZoneVector<TopLevelLiveRange*> live_ranges_;
UseInterval* use_interval_;
LifetimePosition end_position_;
int assigned_slot_;
+ int byte_width_;
+ RegisterKind kind_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
@@ -599,19 +694,24 @@ class RegisterAllocationData final : public ZoneObject {
InstructionSequence* code,
const char* debug_name = nullptr);
- const ZoneVector<LiveRange*>& live_ranges() const { return live_ranges_; }
- ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
- const ZoneVector<LiveRange*>& fixed_live_ranges() const {
+ const ZoneVector<TopLevelLiveRange*>& live_ranges() const {
+ return live_ranges_;
+ }
+ ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
+ const ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() const {
return fixed_live_ranges_;
}
- ZoneVector<LiveRange*>& fixed_live_ranges() { return fixed_live_ranges_; }
- ZoneVector<LiveRange*>& fixed_double_live_ranges() {
+ ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
+ return fixed_live_ranges_;
+ }
+ ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
return fixed_double_live_ranges_;
}
- const ZoneVector<LiveRange*>& fixed_double_live_ranges() const {
+ const ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() const {
return fixed_double_live_ranges_;
}
ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
+ ZoneVector<BitVector*>& live_out_sets() { return live_out_sets_; }
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
DelayedReferences& delayed_references() { return delayed_references_; }
InstructionSequence* code() const { return code_; }
@@ -627,19 +727,20 @@ class RegisterAllocationData final : public ZoneObject {
MachineType MachineTypeFor(int virtual_register);
- LiveRange* LiveRangeFor(int index);
+ TopLevelLiveRange* GetOrCreateLiveRangeFor(int index);
// Creates a new live range.
- LiveRange* NewLiveRange(int index, MachineType machine_type);
- LiveRange* NewChildRangeFor(LiveRange* range);
+ TopLevelLiveRange* NewLiveRange(int index, MachineType machine_type);
+ TopLevelLiveRange* NextLiveRange(MachineType machine_type);
- SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
+ SpillRange* AssignSpillRangeToLiveRange(TopLevelLiveRange* range);
+ SpillRange* CreateSpillRangeForLiveRange(TopLevelLiveRange* range);
MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
const InstructionOperand& from,
const InstructionOperand& to);
- bool IsReference(int virtual_register) const {
- return code()->IsReference(virtual_register);
+ bool IsReference(TopLevelLiveRange* top_range) const {
+ return code()->IsReference(top_range->vreg());
}
bool ExistsUseWithoutDefinition();
@@ -648,6 +749,7 @@ class RegisterAllocationData final : public ZoneObject {
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi);
+ PhiMapValue* GetPhiMapValueFor(TopLevelLiveRange* top_range);
PhiMapValue* GetPhiMapValueFor(int virtual_register);
bool IsBlockBoundary(LifetimePosition pos) const;
@@ -656,8 +758,11 @@ class RegisterAllocationData final : public ZoneObject {
void Print(const LiveRange* range, bool with_children = false);
void Print(const InstructionOperand& op);
void Print(const MoveOperands* move);
+ void Print(const SpillRange* spill_range);
private:
+ int GetNextLiveRangeId();
+
Zone* const allocation_zone_;
Frame* const frame_;
InstructionSequence* const code_;
@@ -665,9 +770,10 @@ class RegisterAllocationData final : public ZoneObject {
const RegisterConfiguration* const config_;
PhiMap phi_map_;
ZoneVector<BitVector*> live_in_sets_;
- ZoneVector<LiveRange*> live_ranges_;
- ZoneVector<LiveRange*> fixed_live_ranges_;
- ZoneVector<LiveRange*> fixed_double_live_ranges_;
+ ZoneVector<BitVector*> live_out_sets_;
+ ZoneVector<TopLevelLiveRange*> live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
+ ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
ZoneVector<SpillRange*> spill_ranges_;
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
@@ -694,12 +800,6 @@ class ConstraintBuilder final : public ZoneObject {
InstructionSequence* code() const { return data()->code(); }
Zone* allocation_zone() const { return data()->allocation_zone(); }
- Instruction* InstructionAt(int index) { return code()->InstructionAt(index); }
- bool IsReference(int virtual_register) const {
- return data()->IsReference(virtual_register);
- }
- LiveRange* LiveRangeFor(int index) { return data()->LiveRangeFor(index); }
-
InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
bool is_tagged);
void MeetRegisterConstraints(const InstructionBlock* block);
@@ -721,6 +821,8 @@ class LiveRangeBuilder final : public ZoneObject {
// Phase 3: compute liveness of all virtual register.
void BuildLiveRanges();
+ static BitVector* ComputeLiveOut(const InstructionBlock* block,
+ RegisterAllocationData* data);
private:
RegisterAllocationData* data() const { return data_; }
@@ -732,12 +834,9 @@ class LiveRangeBuilder final : public ZoneObject {
return data()->live_in_sets();
}
- LiveRange* LiveRangeFor(int index) { return data()->LiveRangeFor(index); }
-
void Verify() const;
// Liveness analysis support.
- BitVector* ComputeLiveOut(const InstructionBlock* block);
void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
void ProcessInstructions(const InstructionBlock* block, BitVector* live);
void ProcessPhis(const InstructionBlock* block, BitVector* live);
@@ -745,8 +844,8 @@ class LiveRangeBuilder final : public ZoneObject {
static int FixedLiveRangeID(int index) { return -index - 1; }
int FixedDoubleLiveRangeID(int index);
- LiveRange* FixedLiveRangeFor(int index);
- LiveRange* FixedDoubleLiveRangeFor(int index);
+ TopLevelLiveRange* FixedLiveRangeFor(int index);
+ TopLevelLiveRange* FixedDoubleLiveRangeFor(int index);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
@@ -756,7 +855,7 @@ class LiveRangeBuilder final : public ZoneObject {
UsePosition* NewUsePosition(LifetimePosition pos) {
return NewUsePosition(pos, nullptr, nullptr, UsePositionHintType::kNone);
}
- LiveRange* LiveRangeFor(InstructionOperand* operand);
+ TopLevelLiveRange* LiveRangeFor(InstructionOperand* operand);
// Helper methods for building intervals.
UsePosition* Define(LifetimePosition position, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type);
@@ -790,8 +889,6 @@ class RegisterAllocator : public ZoneObject {
Zone* allocation_zone() const { return data()->allocation_zone(); }
- LiveRange* LiveRangeFor(int index) { return data()->LiveRangeFor(index); }
-
// Split the given range at the given position.
// If range starts at or after the given position then the
// original range is returned.
@@ -817,7 +914,7 @@ class RegisterAllocator : public ZoneObject {
LifetimePosition FindOptimalSpillingPos(LiveRange* range,
LifetimePosition pos);
- const ZoneVector<LiveRange*>& GetFixedRegisters() const;
+ const ZoneVector<TopLevelLiveRange*>& GetFixedRegisters() const;
const char* RegisterName(int allocation_index) const;
private:
@@ -861,7 +958,7 @@ class LinearScanAllocator final : public RegisterAllocator {
void InactiveToActive(LiveRange* range);
// Helper methods for allocating registers.
- bool TryReuseSpillForPhi(LiveRange* range);
+ bool TryReuseSpillForPhi(TopLevelLiveRange* range);
bool TryAllocateFreeReg(LiveRange* range);
void AllocateBlockedReg(LiveRange* range);
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index 9538684af2..0c38e020ad 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -30,7 +30,9 @@ class RepresentationChanger {
type_error_(false) {}
// TODO(titzer): should Word64 also be implicitly convertable to others?
- static const MachineTypeUnion rWord = kRepWord8 | kRepWord16 | kRepWord32;
+ static bool IsWord(MachineTypeUnion type) {
+ return (type & (kRepWord8 | kRepWord16 | kRepWord32)) != 0;
+ }
Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
MachineTypeUnion use_type) {
@@ -42,7 +44,7 @@ class RepresentationChanger {
// Representations are the same. That's a no-op.
return node;
}
- if ((use_type & rWord) && (output_type & rWord)) {
+ if (IsWord(use_type) && IsWord(output_type)) {
// Both are words less than or equal to 32-bits.
// Since loads of integers from memory implicitly sign or zero extend the
// value to the full machine word size and stores implicitly truncate,
@@ -57,7 +59,7 @@ class RepresentationChanger {
return GetFloat64RepresentationFor(node, output_type);
} else if (use_type & kRepBit) {
return GetBitRepresentationFor(node, output_type);
- } else if (use_type & rWord) {
+ } else if (IsWord(use_type)) {
return GetWord32RepresentationFor(node, output_type,
use_type & kTypeUint32);
} else if (use_type & kRepWord64) {
@@ -75,7 +77,7 @@ class RepresentationChanger {
return node; // No change necessary.
case IrOpcode::kInt32Constant:
if (output_type & kTypeUint32) {
- uint32_t value = OpParameter<uint32_t>(node);
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
return jsgraph()->Constant(static_cast<double>(value));
} else if (output_type & kTypeInt32) {
int32_t value = OpParameter<int32_t>(node);
@@ -97,7 +99,7 @@ class RepresentationChanger {
const Operator* op;
if (output_type & kRepBit) {
op = simplified()->ChangeBitToBool();
- } else if (output_type & rWord) {
+ } else if (IsWord(output_type)) {
if (output_type & kTypeUint32) {
op = simplified()->ChangeUint32ToTagged();
} else if (output_type & kTypeInt32) {
@@ -125,7 +127,7 @@ class RepresentationChanger {
DoubleToFloat32(OpParameter<double>(node)));
case IrOpcode::kInt32Constant:
if (output_type & kTypeUint32) {
- uint32_t value = OpParameter<uint32_t>(node);
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
return jsgraph()->Float32Constant(static_cast<float>(value));
} else {
int32_t value = OpParameter<int32_t>(node);
@@ -140,7 +142,7 @@ class RepresentationChanger {
const Operator* op;
if (output_type & kRepBit) {
return TypeError(node, output_type, kRepFloat32);
- } else if (output_type & rWord) {
+ } else if (IsWord(output_type)) {
if (output_type & kTypeUint32) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -169,7 +171,7 @@ class RepresentationChanger {
return jsgraph()->Float64Constant(OpParameter<double>(node));
case IrOpcode::kInt32Constant:
if (output_type & kTypeUint32) {
- uint32_t value = OpParameter<uint32_t>(node);
+ uint32_t value = static_cast<uint32_t>(OpParameter<int32_t>(node));
return jsgraph()->Float64Constant(static_cast<double>(value));
} else {
int32_t value = OpParameter<int32_t>(node);
@@ -186,7 +188,7 @@ class RepresentationChanger {
const Operator* op;
if (output_type & kRepBit) {
return TypeError(node, output_type, kRepFloat64);
- } else if (output_type & rWord) {
+ } else if (IsWord(output_type)) {
if (output_type & kTypeUint32) {
op = machine()->ChangeUint32ToFloat64();
} else {
@@ -292,7 +294,7 @@ class RepresentationChanger {
// Eagerly fold representation changes for constants.
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- Handle<Object> value = OpParameter<Unique<Object> >(node).handle();
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
DCHECK(value.is_identical_to(factory()->true_value()) ||
value.is_identical_to(factory()->false_value()));
return jsgraph()->Int32Constant(
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 69ece96d4e..63f148d926 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -345,13 +345,9 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (Node* node : *block) {
os << " " << *node;
if (NodeProperties::IsTyped(node)) {
- Bounds bounds = NodeProperties::GetBounds(node);
+ Type* type = NodeProperties::GetType(node);
os << " : ";
- bounds.lower->PrintTo(os);
- if (!bounds.upper->Is(bounds.lower)) {
- os << "..";
- bounds.upper->PrintTo(os);
- }
+ type->PrintTo(os);
}
os << "\n";
}
diff --git a/deps/v8/src/compiler/select-lowering.cc b/deps/v8/src/compiler/select-lowering.cc
index 2e0f0d1a34..28a5d922b7 100644
--- a/deps/v8/src/compiler/select-lowering.cc
+++ b/deps/v8/src/compiler/select-lowering.cc
@@ -8,6 +8,7 @@
#include "src/compiler/diamond.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
namespace v8 {
namespace internal {
@@ -51,10 +52,10 @@ Reduction SelectLowering::Reduce(Node* node) {
}
// Create a Phi hanging off the previously determined merge.
- node->set_op(common()->Phi(p.type(), 2));
node->ReplaceInput(0, vthen);
node->ReplaceInput(1, velse);
node->ReplaceInput(2, merge);
+ NodeProperties::ChangeOp(node, common()->Phi(p.type(), 2));
return Changed(node);
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index eafd3b6a85..7d495bf983 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -15,7 +15,6 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
#include "src/compiler/representation-change.h"
-#include "src/compiler/simplified-lowering.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/source-position.h"
#include "src/objects.h"
@@ -179,8 +178,8 @@ class RepresentationSelector {
bool BothInputsAre(Node* node, Type* type) {
DCHECK_EQ(2, node->InputCount());
- return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
- NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
+ return NodeProperties::GetType(node->InputAt(0))->Is(type) &&
+ NodeProperties::GetType(node->InputAt(1))->Is(type);
}
void ProcessTruncateWord32Input(Node* node, int index, MachineTypeUnion use) {
@@ -267,9 +266,12 @@ class RepresentationSelector {
// Helper for binops of the R x L -> O variety.
void VisitBinop(Node* node, MachineTypeUnion left_use,
MachineTypeUnion right_use, MachineTypeUnion output) {
- DCHECK_EQ(2, node->InputCount());
+ DCHECK_EQ(2, node->op()->ValueInputCount());
ProcessInput(node, 0, left_use);
ProcessInput(node, 1, right_use);
+ for (int i = 2; i < node->InputCount(); i++) {
+ Enqueue(node->InputAt(i));
+ }
SetOutput(node, output);
}
@@ -314,7 +316,7 @@ class RepresentationSelector {
// Infer representation for phi-like nodes.
MachineType GetRepresentationForPhi(Node* node, MachineTypeUnion use) {
// Phis adapt to the output representation their uses demand.
- Type* upper = NodeProperties::GetBounds(node).upper;
+ Type* upper = NodeProperties::GetType(node);
if ((use & kRepMask) == kRepFloat32) {
// only float32 uses.
return kRepFloat32;
@@ -356,7 +358,7 @@ class RepresentationSelector {
ProcessInput(node, 0, kRepBit);
MachineType output = GetRepresentationForPhi(node, use);
- Type* upper = NodeProperties::GetBounds(node).upper;
+ Type* upper = NodeProperties::GetType(node);
MachineType output_type =
static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
SetOutput(node, output_type);
@@ -366,7 +368,8 @@ class RepresentationSelector {
SelectParameters p = SelectParametersOf(node->op());
MachineType type = static_cast<MachineType>(output_type);
if (type != p.type()) {
- node->set_op(lowering->common()->Select(type, p.hint()));
+ NodeProperties::ChangeOp(node,
+ lowering->common()->Select(type, p.hint()));
}
// Convert inputs to the output representation of this select.
@@ -386,7 +389,7 @@ class RepresentationSelector {
SimplifiedLowering* lowering) {
MachineType output = GetRepresentationForPhi(node, use);
- Type* upper = NodeProperties::GetBounds(node).upper;
+ Type* upper = NodeProperties::GetType(node);
MachineType output_type =
static_cast<MachineType>(changer_->TypeFromUpperBound(upper) | output);
SetOutput(node, output_type);
@@ -397,7 +400,7 @@ class RepresentationSelector {
// Update the phi operator.
MachineType type = static_cast<MachineType>(output_type);
if (type != OpParameter<MachineType>(node)) {
- node->set_op(lowering->common()->Phi(type, values));
+ NodeProperties::ChangeOp(node, lowering->common()->Phi(type, values));
}
// Convert inputs to the output representation of this phi.
@@ -451,7 +454,8 @@ class RepresentationSelector {
MachineTypeUnion input_type = GetInfo(node->InputAt(i))->output;
(*types)[i] = static_cast<MachineType>(input_type);
}
- node->set_op(jsgraph_->common()->TypedStateValues(types));
+ NodeProperties::ChangeOp(node,
+ jsgraph_->common()->TypedStateValues(types));
}
SetOutput(node, kMachAnyTagged);
}
@@ -471,7 +475,7 @@ class RepresentationSelector {
bool CanLowerToInt32Binop(Node* node, MachineTypeUnion use) {
return BothInputsAre(node, Type::Signed32()) &&
(!CanObserveNonInt32(use) ||
- NodeProperties::GetBounds(node).upper->Is(Type::Signed32()));
+ NodeProperties::GetType(node)->Is(Type::Signed32()));
}
bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
@@ -482,7 +486,7 @@ class RepresentationSelector {
bool CanLowerToUint32Binop(Node* node, MachineTypeUnion use) {
return BothInputsAre(node, Type::Unsigned32()) &&
(!CanObserveNonUint32(use) ||
- NodeProperties::GetBounds(node).upper->Is(Type::Unsigned32()));
+ NodeProperties::GetType(node)->Is(Type::Unsigned32()));
}
bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
@@ -524,7 +528,7 @@ class RepresentationSelector {
return VisitLeaf(node, 0);
case IrOpcode::kParameter: {
// TODO(titzer): use representation from linkage.
- Type* upper = NodeProperties::GetBounds(node).upper;
+ Type* upper = NodeProperties::GetType(node);
ProcessInput(node, 0, 0);
SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
return;
@@ -581,12 +585,12 @@ class RepresentationSelector {
MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
if (input & kRepBit) {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
- node->set_op(lowering->machine()->Word32Equal());
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+ NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
} else {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
- node->set_op(lowering->machine()->WordEqual());
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
} else {
// No input representation requirement; adapt during lowering.
@@ -603,8 +607,8 @@ class RepresentationSelector {
DeferReplacement(node, node->InputAt(0));
} else {
// BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
- node->set_op(lowering->machine()->WordEqual());
node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
}
} else {
// No input representation requirement; adapt during lowering.
@@ -620,15 +624,15 @@ class RepresentationSelector {
if (BothInputsAre(node, Type::Signed32())) {
// => signed Int32Cmp
VisitInt32Cmp(node);
- if (lower()) node->set_op(Int32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else if (BothInputsAre(node, Type::Unsigned32())) {
// => unsigned Int32Cmp
VisitUint32Cmp(node);
- if (lower()) node->set_op(Uint32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else {
// => Float64Cmp
VisitFloat64Cmp(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
break;
}
@@ -639,27 +643,27 @@ class RepresentationSelector {
if (CanLowerToInt32Binop(node, use)) {
// => signed Int32Add/Sub
VisitInt32Binop(node);
- if (lower()) node->set_op(Int32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else if (CanLowerToInt32AdditiveBinop(node, use)) {
// => signed Int32Add/Sub, truncating inputs
ProcessTruncateWord32Input(node, 0, kTypeInt32);
ProcessTruncateWord32Input(node, 1, kTypeInt32);
SetOutput(node, kMachInt32);
- if (lower()) node->set_op(Int32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
} else if (CanLowerToUint32Binop(node, use)) {
// => unsigned Int32Add/Sub
VisitUint32Binop(node);
- if (lower()) node->set_op(Uint32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else if (CanLowerToUint32AdditiveBinop(node, use)) {
// => signed Int32Add/Sub, truncating inputs
ProcessTruncateWord32Input(node, 0, kTypeUint32);
ProcessTruncateWord32Input(node, 1, kTypeUint32);
SetOutput(node, kMachUint32);
- if (lower()) node->set_op(Uint32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Uint32Op(node));
} else {
// => Float64Add/Sub
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
}
break;
}
@@ -669,13 +673,13 @@ class RepresentationSelector {
if (CanLowerToInt32Binop(node, use)) {
// => signed Int32Mul
VisitInt32Binop(node);
- if (lower()) node->set_op(Int32Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Int32Op(node));
break;
}
}
// => Float64Mul
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
case IrOpcode::kNumberDivide: {
@@ -693,7 +697,7 @@ class RepresentationSelector {
}
// => Float64Div
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
case IrOpcode::kNumberModulus: {
@@ -711,7 +715,7 @@ class RepresentationSelector {
}
// => Float64Mod
VisitFloat64Binop(node);
- if (lower()) node->set_op(Float64Op(node));
+ if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
break;
}
case IrOpcode::kNumberShiftLeft: {
@@ -732,7 +736,7 @@ class RepresentationSelector {
case IrOpcode::kNumberToInt32: {
MachineTypeUnion use_rep = use & kRepMask;
Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetBounds(input).upper;
+ Type* in_upper = NodeProperties::GetType(input);
MachineTypeUnion in = GetInfo(input)->output;
if (in_upper->Is(Type::Signed32())) {
// If the input has type int32, pass through representation.
@@ -753,8 +757,9 @@ class RepresentationSelector {
// TODO(turbofan): avoid a truncation with a smi check.
VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
if (lower()) {
- node->set_op(lowering->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript));
+ NodeProperties::ChangeOp(
+ node, lowering->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript));
}
}
break;
@@ -762,7 +767,7 @@ class RepresentationSelector {
case IrOpcode::kNumberToUint32: {
MachineTypeUnion use_rep = use & kRepMask;
Node* input = node->InputAt(0);
- Type* in_upper = NodeProperties::GetBounds(input).upper;
+ Type* in_upper = NodeProperties::GetType(input);
MachineTypeUnion in = GetInfo(input)->output;
if (in_upper->Is(Type::Unsigned32())) {
// If the input has type uint32, pass through representation.
@@ -783,8 +788,9 @@ class RepresentationSelector {
// TODO(turbofan): avoid a truncation with a smi check.
VisitUnop(node, kTypeUint32 | kRepFloat64, kTypeUint32 | kRepWord32);
if (lower()) {
- node->set_op(lowering->machine()->TruncateFloat64ToInt32(
- TruncationMode::kJavaScript));
+ NodeProperties::ChangeOp(
+ node, lowering->machine()->TruncateFloat64ToInt32(
+ TruncationMode::kJavaScript));
}
}
break;
@@ -799,16 +805,18 @@ class RepresentationSelector {
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
flags, properties);
- node->set_op(jsgraph_->common()->Call(desc));
node->InsertInput(jsgraph_->zone(), 0,
jsgraph_->HeapConstant(callable.code()));
node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
+ NodeProperties::ChangeOp(node, jsgraph_->common()->Call(desc));
}
break;
}
case IrOpcode::kReferenceEqual: {
VisitBinop(node, kMachAnyTagged, kRepBit);
- if (lower()) node->set_op(lowering->machine()->WordEqual());
+ if (lower()) {
+ NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ }
break;
}
case IrOpcode::kStringEqual: {
@@ -920,25 +928,6 @@ class RepresentationSelector {
}
break;
}
- case IrOpcode::kObjectIsNonNegativeSmi: {
- ProcessInput(node, 0, kMachAnyTagged);
- SetOutput(node, kRepBit | kTypeBool);
- if (lower()) {
- Node* is_tagged = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordAnd(), node->InputAt(0),
- jsgraph_->IntPtrConstant(kSmiTagMask));
- Node* is_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->WordEqual(), is_tagged,
- jsgraph_->IntPtrConstant(kSmiTag));
- Node* is_non_neg = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->IntLessThanOrEqual(),
- jsgraph_->IntPtrConstant(0), node->InputAt(0));
- Node* is_non_neg_smi = jsgraph_->graph()->NewNode(
- jsgraph_->machine()->Word32And(), is_smi, is_non_neg);
- DeferReplacement(node, is_non_neg_smi);
- }
- break;
- }
//------------------------------------------------------------------
// Machine-level operators.
@@ -1227,7 +1216,6 @@ void SimplifiedLowering::DoAllocate(Node* node) {
Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
Operator::Properties props = node->op()->properties();
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
- node->set_op(common()->Call(desc));
ExternalReference ref(f, jsgraph()->isolate());
int32_t flags = AllocateTargetSpace::encode(space);
node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
@@ -1235,26 +1223,27 @@ void SimplifiedLowering::DoAllocate(Node* node) {
node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
+ NodeProperties::ChangeOp(node, common()->Call(desc));
}
void SimplifiedLowering::DoLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
- node->set_op(machine()->Load(access.machine_type));
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
}
void SimplifiedLowering::DoStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
- Type* type = NodeProperties::GetBounds(node->InputAt(1)).upper;
+ Type* type = NodeProperties::GetType(node->InputAt(1));
WriteBarrierKind kind =
ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type, type);
- node->set_op(
- machine()->Store(StoreRepresentation(access.machine_type, kind)));
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
node->InsertInput(graph()->zone(), 1, offset);
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(access.machine_type, kind)));
}
@@ -1328,13 +1317,13 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
NodeProperties::ReplaceUses(node, node, ephi);
// Turn the {node} into a Phi.
- node->set_op(common()->Phi(output_type, 2));
node->ReplaceInput(0, vtrue);
node->ReplaceInput(1, vfalse);
node->ReplaceInput(2, merge);
node->TrimInputCount(3);
+ NodeProperties::ChangeOp(node, common()->Phi(output_type, 2));
} else {
- node->set_op(machine()->CheckedLoad(type));
+ NodeProperties::ChangeOp(node, machine()->CheckedLoad(type));
}
}
@@ -1342,43 +1331,41 @@ void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
void SimplifiedLowering::DoStoreBuffer(Node* node) {
DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
MachineType const type = BufferAccessOf(node->op()).machine_type();
- node->set_op(machine()->CheckedStore(type));
+ NodeProperties::ChangeOp(node, machine()->CheckedStore(type));
}
void SimplifiedLowering::DoLoadElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
- node->set_op(machine()->Load(access.machine_type));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
}
void SimplifiedLowering::DoStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
- Type* type = NodeProperties::GetBounds(node->InputAt(2)).upper;
- node->set_op(machine()->Store(
- StoreRepresentation(access.machine_type,
- ComputeWriteBarrierKind(access.base_is_tagged,
- access.machine_type, type))));
+ Type* type = NodeProperties::GetType(node->InputAt(2));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+ NodeProperties::ChangeOp(
+ node, machine()->Store(StoreRepresentation(
+ access.machine_type,
+ ComputeWriteBarrierKind(access.base_is_tagged,
+ access.machine_type, type))));
}
-Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
- Runtime::FunctionId f =
- requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
- ExternalReference ref(f, jsgraph()->isolate());
- Operator::Properties props = node->op()->properties();
- // TODO(mstarzinger): We should call StringCompareStub here instead, once an
- // interface descriptor is available for it.
- CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(zone(), f, 2, props);
- return graph()->NewNode(common()->Call(desc),
- jsgraph()->CEntryStubConstant(1),
- NodeProperties::GetValueInput(node, 0),
- NodeProperties::GetValueInput(node, 1),
- jsgraph()->ExternalConstant(ref),
- jsgraph()->Int32Constant(2),
- jsgraph()->NoContextConstant());
+Node* SimplifiedLowering::StringComparison(Node* node) {
+ Operator::Properties properties = node->op()->properties();
+ Callable callable = CodeFactory::StringCompare(isolate());
+ CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, flags, properties);
+ return graph()->NewNode(
+ common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
+ NodeProperties::GetValueInput(node, 0),
+ NodeProperties::GetValueInput(node, 1), jsgraph()->NoContextConstant(),
+ NodeProperties::GetEffectInput(node),
+ NodeProperties::GetControlInput(node));
}
@@ -1632,34 +1619,60 @@ Node* SimplifiedLowering::Uint32Mod(Node* const node) {
void SimplifiedLowering::DoShift(Node* node, Operator const* op) {
- node->set_op(op);
Node* const rhs = NodeProperties::GetValueInput(node, 1);
- Type* const rhs_type = NodeProperties::GetBounds(rhs).upper;
+ Type* const rhs_type = NodeProperties::GetType(rhs);
if (!rhs_type->Is(zero_thirtyone_range_)) {
node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
jsgraph()->Int32Constant(0x1f)));
}
+ NodeProperties::ChangeOp(node, op);
}
+namespace {
+
+void ReplaceEffectUses(Node* node, Node* replacement) {
+ // Requires distinguishing between value and effect edges.
+ DCHECK(replacement->op()->EffectOutputCount() > 0);
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(replacement);
+ } else {
+ DCHECK(NodeProperties::IsValueEdge(edge));
+ }
+ }
+}
+
+} // namespace
+
+
void SimplifiedLowering::DoStringEqual(Node* node) {
- node->set_op(machine()->WordEqual());
- node->ReplaceInput(0, StringComparison(node, false));
+ Node* comparison = StringComparison(node);
+ ReplaceEffectUses(node, comparison);
+ node->ReplaceInput(0, comparison);
node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->WordEqual());
}
void SimplifiedLowering::DoStringLessThan(Node* node) {
- node->set_op(machine()->IntLessThan());
- node->ReplaceInput(0, StringComparison(node, true));
+ Node* comparison = StringComparison(node);
+ ReplaceEffectUses(node, comparison);
+ node->ReplaceInput(0, comparison);
node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->IntLessThan());
}
void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
- node->set_op(machine()->IntLessThanOrEqual());
- node->ReplaceInput(0, StringComparison(node, true));
+ Node* comparison = StringComparison(node);
+ ReplaceEffectUses(node, comparison);
+ node->ReplaceInput(0, comparison);
node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+ node->TrimInputCount(2);
+ NodeProperties::ChangeOp(node, machine()->IntLessThanOrEqual());
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 302908d5d8..4b9e86b786 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -59,7 +59,7 @@ class SimplifiedLowering final {
Node* Untag(Node* node);
Node* OffsetMinusTagConstant(int32_t offset);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
- Node* StringComparison(Node* node, bool requires_ordering);
+ Node* StringComparison(Node* node);
Node* Int32Div(Node* const node);
Node* Int32Mod(Node* const node);
Node* Uint32Div(Node* const node);
@@ -67,6 +67,7 @@ class SimplifiedLowering final {
friend class RepresentationSelector;
+ Isolate* isolate() { return jsgraph_->isolate(); }
Zone* zone() { return jsgraph_->zone(); }
JSGraph* jsgraph() { return jsgraph_; }
Graph* graph() { return jsgraph()->graph(); }
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index 2e87f362e7..a7f790563e 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -8,6 +8,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/operator-properties.h"
+#include "src/conversions-inl.h"
namespace v8 {
namespace internal {
@@ -25,8 +26,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
case IrOpcode::kBooleanNot: {
HeapObjectMatcher m(node->InputAt(0));
if (m.HasValue()) {
- return Replace(
- jsgraph()->BooleanConstant(!m.Value().handle()->BooleanValue()));
+ return Replace(jsgraph()->BooleanConstant(!m.Value()->BooleanValue()));
}
if (m.IsBooleanNot()) return Replace(m.InputAt(0));
break;
@@ -40,7 +40,7 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
case IrOpcode::kChangeBoolToBit: {
HeapObjectMatcher m(node->InputAt(0));
- if (m.HasValue()) return ReplaceInt32(m.Value().handle()->BooleanValue());
+ if (m.HasValue()) return ReplaceInt32(m.Value()->BooleanValue());
if (m.IsChangeBitToBool()) return Replace(m.InputAt(0));
break;
}
@@ -100,8 +100,8 @@ Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
Node* a) {
DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
DCHECK_LE(1, node->InputCount());
- node->set_op(op);
node->ReplaceInput(0, a);
+ NodeProperties::ChangeOp(node, op);
return Changed(node);
}
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index d401fb7862..8432d21d95 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -174,9 +174,6 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(NumberToInt32, Operator::kNoProperties, 1) \
V(NumberToUint32, Operator::kNoProperties, 1) \
V(PlainPrimitiveToNumber, Operator::kNoProperties, 1) \
- V(StringEqual, Operator::kCommutative, 2) \
- V(StringLessThan, Operator::kNoProperties, 2) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
V(ChangeTaggedToInt32, Operator::kNoProperties, 1) \
V(ChangeTaggedToUint32, Operator::kNoProperties, 1) \
V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
@@ -185,9 +182,12 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
V(ChangeBoolToBit, Operator::kNoProperties, 1) \
V(ChangeBitToBool, Operator::kNoProperties, 1) \
- V(ObjectIsSmi, Operator::kNoProperties, 1) \
- V(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
+ V(ObjectIsSmi, Operator::kNoProperties, 1)
+#define NO_THROW_OP_LIST(V) \
+ V(StringEqual, Operator::kCommutative, 2) \
+ V(StringLessThan, Operator::kNoThrow, 2) \
+ V(StringLessThanOrEqual, Operator::kNoThrow, 2)
struct SimplifiedOperatorGlobalCache final {
#define PURE(Name, properties, input_count) \
@@ -200,6 +200,16 @@ struct SimplifiedOperatorGlobalCache final {
PURE_OP_LIST(PURE)
#undef PURE
+#define NO_THROW(Name, properties, input_count) \
+ struct Name##Operator final : public Operator { \
+ Name##Operator() \
+ : Operator(IrOpcode::k##Name, Operator::kNoThrow | properties, #Name, \
+ input_count, 1, 1, 1, 1, 0) {} \
+ }; \
+ Name##Operator k##Name;
+ NO_THROW_OP_LIST(NO_THROW)
+#undef NO_THROW
+
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \
@@ -230,10 +240,11 @@ SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
: cache_(kCache.Get()), zone_(zone) {}
-#define PURE(Name, properties, input_count) \
+#define GET_FROM_CACHE(Name, properties, input_count) \
const Operator* SimplifiedOperatorBuilder::Name() { return &cache_.k##Name; }
-PURE_OP_LIST(PURE)
-#undef PURE
+PURE_OP_LIST(GET_FROM_CACHE)
+NO_THROW_OP_LIST(GET_FROM_CACHE)
+#undef GET_FROM_CACHE
const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 447bf9e5e9..53b6b044a1 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -164,7 +164,6 @@ class SimplifiedOperatorBuilder final {
const Operator* ChangeBitToBool();
const Operator* ObjectIsSmi();
- const Operator* ObjectIsNonNegativeSmi();
const Operator* Allocate(PretenureFlag pretenure = NOT_TENURED);
diff --git a/deps/v8/src/compiler/source-position.cc b/deps/v8/src/compiler/source-position.cc
index aba77b36f3..48361ecac7 100644
--- a/deps/v8/src/compiler/source-position.cc
+++ b/deps/v8/src/compiler/source-position.cc
@@ -5,7 +5,6 @@
#include "src/compiler/source-position.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-aux-data.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/tail-call-optimization.cc b/deps/v8/src/compiler/tail-call-optimization.cc
index a88ebe3e63..6635fb982b 100644
--- a/deps/v8/src/compiler/tail-call-optimization.cc
+++ b/deps/v8/src/compiler/tail-call-optimization.cc
@@ -63,8 +63,6 @@ Reduction TailCallOptimization::Reduce(Node* node) {
DCHECK_EQ(call, NodeProperties::GetControlInput(control, 0));
DCHECK_EQ(3, node->InputCount());
- node->set_op(
- common()->TailCall(OpParameter<CallDescriptor const*>(call)));
node->ReplaceInput(0, NodeProperties::GetEffectInput(call));
node->ReplaceInput(1, NodeProperties::GetControlInput(call));
node->RemoveInput(2);
@@ -72,6 +70,8 @@ Reduction TailCallOptimization::Reduce(Node* node) {
node->InsertInput(graph()->zone(), index,
NodeProperties::GetValueInput(call, index));
}
+ NodeProperties::ChangeOp(
+ node, common()->TailCall(OpParameter<CallDescriptor const*>(call)));
return Changed(node);
}
}
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index aabcf4b5a8..4707aef1e5 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -13,96 +13,16 @@
#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/objects-inl.h"
+#include "src/zone-type-cache.h"
namespace v8 {
namespace internal {
namespace compiler {
-class TyperCache final {
- private:
- // This has to be first for the initialization magic to work.
- Zone zone_;
-
- public:
- TyperCache() = default;
-
- Type* const kInt8 =
- CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
- Type* const kUint8 =
- CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
- Type* const kUint8Clamped = kUint8;
- Type* const kInt16 =
- CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
- Type* const kUint16 =
- CreateNative(CreateRange<uint16_t>(), Type::UntaggedUnsigned16());
- Type* const kInt32 = CreateNative(Type::Signed32(), Type::UntaggedSigned32());
- Type* const kUint32 =
- CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
- Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
- Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
-
- Type* const kSingletonZero = CreateRange(0.0, 0.0);
- Type* const kSingletonOne = CreateRange(1.0, 1.0);
- Type* const kZeroOrOne = CreateRange(0.0, 1.0);
- Type* const kZeroish =
- Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
- Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
- Type* const kWeakint = Type::Union(kInteger, Type::MinusZeroOrNaN(), zone());
- Type* const kWeakintFunc1 = Type::Function(kWeakint, Type::Number(), zone());
-
- Type* const kRandomFunc0 = Type::Function(Type::OrderedNumber(), zone());
- Type* const kAnyFunc0 = Type::Function(Type::Any(), zone());
- Type* const kAnyFunc1 = Type::Function(Type::Any(), Type::Any(), zone());
- Type* const kAnyFunc2 =
- Type::Function(Type::Any(), Type::Any(), Type::Any(), zone());
- Type* const kAnyFunc3 = Type::Function(Type::Any(), Type::Any(), Type::Any(),
- Type::Any(), zone());
- Type* const kNumberFunc0 = Type::Function(Type::Number(), zone());
- Type* const kNumberFunc1 =
- Type::Function(Type::Number(), Type::Number(), zone());
- Type* const kNumberFunc2 =
- Type::Function(Type::Number(), Type::Number(), Type::Number(), zone());
- Type* const kImulFunc = Type::Function(Type::Signed32(), Type::Integral32(),
- Type::Integral32(), zone());
- Type* const kClz32Func =
- Type::Function(CreateRange(0, 32), Type::Number(), zone());
-
-#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
- Type* const k##TypeName##Array = CreateArray(k##TypeName);
- TYPED_ARRAYS(TYPED_ARRAY)
-#undef TYPED_ARRAY
-
- private:
- Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
-
- Type* CreateArrayFunction(Type* array) {
- Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
- Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
- Type* arg3 = arg2;
- return Type::Function(array, arg1, arg2, arg3, zone());
- }
-
- Type* CreateNative(Type* semantic, Type* representation) {
- return Type::Intersect(semantic, representation, zone());
- }
-
- template <typename T>
- Type* CreateRange() {
- return CreateRange(std::numeric_limits<T>::min(),
- std::numeric_limits<T>::max());
- }
-
- Type* CreateRange(double min, double max) {
- return Type::Range(min, max, zone());
- }
-
- Zone* zone() { return &zone_; }
-};
-
-
namespace {
-base::LazyInstance<TyperCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
} // namespace
@@ -128,9 +48,12 @@ Typer::Typer(Isolate* isolate, Graph* graph, Type::FunctionType* function_type)
Type* infinity = Type::Constant(factory->infinity_value(), zone);
Type* minus_infinity = Type::Constant(factory->minus_infinity_value(), zone);
+ // TODO(neis): Unfortunately, the infinities created in other places might
+ // be different ones (eg the result of NewNumber in TypeNumberConstant).
Type* truncating_to_zero =
Type::Union(Type::Union(infinity, minus_infinity, zone),
Type::MinusZeroOrNaN(), zone);
+ DCHECK(!truncating_to_zero->Maybe(Type::Integral32()));
singleton_false_ = Type::Constant(factory->false_value(), zone);
singleton_true_ = Type::Constant(factory->true_value(), zone);
@@ -165,13 +88,13 @@ class Typer::Visitor : public Reducer {
switch (node->opcode()) {
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
- return UpdateBounds(node, TypeBinaryOp(node, x##Typer));
+ return UpdateType(node, TypeBinaryOp(node, x##Typer));
JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
#undef DECLARE_CASE
#define DECLARE_CASE(x) \
case IrOpcode::k##x: \
- return UpdateBounds(node, Type##x(node));
+ return UpdateType(node, Type##x(node));
DECLARE_CASE(Start)
DECLARE_CASE(IfException)
// VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
@@ -208,7 +131,7 @@ class Typer::Visitor : public Reducer {
return NoChange();
}
- Bounds TypeNode(Node* node) {
+ Type* TypeNode(Node* node) {
switch (node->opcode()) {
#define DECLARE_CASE(x) \
case IrOpcode::k##x: return TypeBinaryOp(node, x##Typer);
@@ -250,7 +173,7 @@ class Typer::Visitor : public Reducer {
break;
}
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
Type* TypeConstant(Handle<Object> value);
@@ -259,23 +182,23 @@ class Typer::Visitor : public Reducer {
Typer* typer_;
ZoneSet<NodeId> weakened_nodes_;
-#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+#define DECLARE_METHOD(x) inline Type* Type##x(Node* node);
DECLARE_METHOD(Start)
DECLARE_METHOD(IfException)
VALUE_OP_LIST(DECLARE_METHOD)
#undef DECLARE_METHOD
- Bounds BoundsOrNone(Node* node) {
- return NodeProperties::IsTyped(node) ? NodeProperties::GetBounds(node)
- : Bounds(Type::None());
+ Type* TypeOrNone(Node* node) {
+ return NodeProperties::IsTyped(node) ? NodeProperties::GetType(node)
+ : Type::None();
}
- Bounds Operand(Node* node, int i) {
+ Type* Operand(Node* node, int i) {
Node* operand_node = NodeProperties::GetValueInput(node, i);
- return BoundsOrNone(operand_node);
+ return TypeOrNone(operand_node);
}
- Bounds WrapContextBoundsForInput(Node* node);
+ Type* WrapContextTypeForInput(Node* node);
Type* Weaken(Node* node, Type* current_type, Type* previous_type);
Zone* zone() { return typer_->zone(); }
@@ -290,8 +213,8 @@ class Typer::Visitor : public Reducer {
typedef Type* (*UnaryTyperFun)(Type*, Typer* t);
typedef Type* (*BinaryTyperFun)(Type*, Type*, Typer* t);
- Bounds TypeUnaryOp(Node* node, UnaryTyperFun);
- Bounds TypeBinaryOp(Node* node, BinaryTyperFun);
+ Type* TypeUnaryOp(Node* node, UnaryTyperFun);
+ Type* TypeBinaryOp(Node* node, BinaryTyperFun);
enum ComparisonOutcomeFlags {
kComparisonTrue = 1,
@@ -329,29 +252,26 @@ class Typer::Visitor : public Reducer {
static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
static Type* JSCallFunctionTyper(Type*, Typer*);
- Reduction UpdateBounds(Node* node, Bounds current) {
+ Reduction UpdateType(Node* node, Type* current) {
if (NodeProperties::IsTyped(node)) {
- // Widen the bounds of a previously typed node.
- Bounds previous = NodeProperties::GetBounds(node);
+ // Widen the type of a previously typed node.
+ Type* previous = NodeProperties::GetType(node);
if (node->opcode() == IrOpcode::kPhi) {
// Speed up termination in the presence of range types:
- current.upper = Weaken(node, current.upper, previous.upper);
- current.lower = Weaken(node, current.lower, previous.lower);
+ current = Weaken(node, current, previous);
}
- // Types should not get less precise.
- DCHECK(previous.lower->Is(current.lower));
- DCHECK(previous.upper->Is(current.upper));
+ DCHECK(previous->Is(current));
- NodeProperties::SetBounds(node, current);
- if (!(previous.Narrows(current) && current.Narrows(previous))) {
+ NodeProperties::SetType(node, current);
+ if (!(previous->Is(current) && current->Is(previous))) {
// If something changed, revisit all uses.
return Changed(node);
}
return NoChange();
} else {
- // No previous type, simply update the bounds.
- NodeProperties::SetBounds(node, current);
+ // No previous type, simply update the type.
+ NodeProperties::SetType(node, current);
return Changed(node);
}
}
@@ -377,12 +297,12 @@ void Typer::Decorator::Decorate(Node* node) {
bool is_typed = NodeProperties::IsTyped(node);
if (is_typed || NodeProperties::AllValueInputsAreTyped(node)) {
Visitor typing(typer_);
- Bounds bounds = typing.TypeNode(node);
+ Type* type = typing.TypeNode(node);
if (is_typed) {
- bounds =
- Bounds::Both(bounds, NodeProperties::GetBounds(node), typer_->zone());
+ type = Type::Intersect(type, NodeProperties::GetType(node),
+ typer_->zone());
}
- NodeProperties::SetBounds(node, bounds);
+ NodeProperties::SetType(node, type);
}
}
}
@@ -395,36 +315,17 @@ void Typer::Decorator::Decorate(Node* node) {
// as an argument.
-Bounds Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
- Bounds input = Operand(node, 0);
- Type* upper =
- input.upper->IsInhabited() ? f(input.upper, typer_) : Type::None();
- Type* lower = input.lower->IsInhabited()
- ? ((input.lower == input.upper || upper->IsConstant())
- ? upper // TODO(neis): Extend this to Range(x,x),
- // NaN, MinusZero, ...?
- : f(input.lower, typer_))
- : Type::None();
- // TODO(neis): Figure out what to do with lower bound.
- return Bounds(lower, upper);
+Type* Typer::Visitor::TypeUnaryOp(Node* node, UnaryTyperFun f) {
+ Type* input = Operand(node, 0);
+ return input->IsInhabited() ? f(input, typer_) : Type::None();
}
-Bounds Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
- Bounds left = Operand(node, 0);
- Bounds right = Operand(node, 1);
- Type* upper = left.upper->IsInhabited() && right.upper->IsInhabited()
- ? f(left.upper, right.upper, typer_)
- : Type::None();
- Type* lower =
- left.lower->IsInhabited() && right.lower->IsInhabited()
- ? (((left.lower == left.upper && right.lower == right.upper) ||
- upper->IsConstant())
- ? upper
- : f(left.lower, right.lower, typer_))
- : Type::None();
- // TODO(neis): Figure out what to do with lower bound.
- return Bounds(lower, upper);
+Type* Typer::Visitor::TypeBinaryOp(Node* node, BinaryTyperFun f) {
+ Type* left = Operand(node, 0);
+ Type* right = Operand(node, 1);
+ return left->IsInhabited() && right->IsInhabited() ? f(left, right, typer_)
+ : Type::None();
}
@@ -558,147 +459,136 @@ Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
// Control operators.
-Bounds Typer::Visitor::TypeStart(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
-}
+Type* Typer::Visitor::TypeStart(Node* node) { return Type::Internal(zone()); }
-Bounds Typer::Visitor::TypeIfException(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeIfException(Node* node) { return Type::Any(); }
// Common operators.
-Bounds Typer::Visitor::TypeParameter(Node* node) {
+Type* Typer::Visitor::TypeParameter(Node* node) {
if (Type::FunctionType* function_type = typer_->function_type()) {
int const index = ParameterIndexOf(node->op());
if (index >= 0 && index < function_type->Arity()) {
- return Bounds(Type::None(), function_type->Parameter(index));
+ return function_type->Parameter(index);
}
}
- return Bounds::Unbounded(zone());
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeOsrValue(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
+Type* Typer::Visitor::TypeInt32Constant(Node* node) {
double number = OpParameter<int32_t>(node);
- return Bounds(Type::Intersect(
- Type::Range(number, number, zone()), Type::UntaggedSigned32(), zone()));
+ return Type::Intersect(Type::Range(number, number, zone()),
+ Type::UntaggedSigned32(), zone());
}
-Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
+Type* Typer::Visitor::TypeInt64Constant(Node* node) {
// TODO(rossberg): This actually seems to be a PointerConstant so far...
- return Bounds(Type::Internal()); // TODO(rossberg): Add int64 bitset type?
+ return Type::Internal(); // TODO(rossberg): Add int64 bitset type?
}
-Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
- return Bounds(Type::Intersect(
- Type::Of(OpParameter<float>(node), zone()),
- Type::UntaggedFloat32(), zone()));
+Type* Typer::Visitor::TypeFloat32Constant(Node* node) {
+ return Type::Intersect(Type::Of(OpParameter<float>(node), zone()),
+ Type::UntaggedFloat32(), zone());
}
-Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
- return Bounds(Type::Intersect(
- Type::Of(OpParameter<double>(node), zone()),
- Type::UntaggedFloat64(), zone()));
+Type* Typer::Visitor::TypeFloat64Constant(Node* node) {
+ return Type::Intersect(Type::Of(OpParameter<double>(node), zone()),
+ Type::UntaggedFloat64(), zone());
}
-Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
+Type* Typer::Visitor::TypeNumberConstant(Node* node) {
Factory* f = isolate()->factory();
- return Bounds(Type::Constant(
- f->NewNumber(OpParameter<double>(node)), zone()));
+ double number = OpParameter<double>(node);
+ if (Type::IsInteger(number)) {
+ return Type::Range(number, number, zone());
+ }
+ return Type::Constant(f->NewNumber(number), zone());
}
-Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
- return Bounds(TypeConstant(OpParameter<Unique<HeapObject> >(node).handle()));
+Type* Typer::Visitor::TypeHeapConstant(Node* node) {
+ return TypeConstant(OpParameter<Handle<HeapObject>>(node));
}
-Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
+Type* Typer::Visitor::TypeExternalConstant(Node* node) {
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeSelect(Node* node) {
- return Bounds::Either(Operand(node, 1), Operand(node, 2), zone());
+Type* Typer::Visitor::TypeSelect(Node* node) {
+ return Type::Union(Operand(node, 1), Operand(node, 2), zone());
}
-Bounds Typer::Visitor::TypePhi(Node* node) {
+Type* Typer::Visitor::TypePhi(Node* node) {
int arity = node->op()->ValueInputCount();
- Bounds bounds = Operand(node, 0);
+ Type* type = Operand(node, 0);
for (int i = 1; i < arity; ++i) {
- bounds = Bounds::Either(bounds, Operand(node, i), zone());
+ type = Type::Union(type, Operand(node, i), zone());
}
- return bounds;
+ return type;
}
-Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
+Type* Typer::Visitor::TypeEffectPhi(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeEffectSet(Node* node) {
+Type* Typer::Visitor::TypeEffectSet(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeValueEffect(Node* node) {
+Type* Typer::Visitor::TypeValueEffect(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeFinish(Node* node) {
- return Operand(node, 0);
-}
+Type* Typer::Visitor::TypeFinish(Node* node) { return Operand(node, 0); }
-Bounds Typer::Visitor::TypeFrameState(Node* node) {
+Type* Typer::Visitor::TypeFrameState(Node* node) {
// TODO(rossberg): Ideally FrameState wouldn't have a value output.
- return Bounds(Type::None(zone()), Type::Internal(zone()));
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeStateValues(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
+Type* Typer::Visitor::TypeStateValues(Node* node) {
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeTypedStateValues(Node* node) {
- return Bounds(Type::None(zone()), Type::Internal(zone()));
+Type* Typer::Visitor::TypeTypedStateValues(Node* node) {
+ return Type::Internal(zone());
}
-Bounds Typer::Visitor::TypeCall(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeCall(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeProjection(Node* node) {
+Type* Typer::Visitor::TypeProjection(Node* node) {
// TODO(titzer): use the output type of the input to determine the bounds.
- return Bounds::Unbounded(zone());
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeDead(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeDead(Node* node) { return Type::Any(); }
// JS comparison operators.
@@ -1191,7 +1081,7 @@ Type* Typer::Visitor::JSUnaryNotTyper(Type* type, Typer* t) {
}
-Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
+Type* Typer::Visitor::TypeJSUnaryNot(Node* node) {
return TypeUnaryOp(node, JSUnaryNotTyper);
}
@@ -1204,7 +1094,8 @@ Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
return Type::Constant(f->number_string(), t->zone());
} else if (type->Is(Type::Symbol())) {
return Type::Constant(f->symbol_string(), t->zone());
- } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable()))) {
+ } else if (type->Is(Type::Union(Type::Undefined(), Type::Undetectable(),
+ t->zone()))) {
return Type::Constant(f->undefined_string(), t->zone());
} else if (type->Is(Type::Null())) {
return Type::Constant(f->object_string(), t->zone());
@@ -1213,7 +1104,7 @@ Type* Typer::Visitor::JSTypeOfTyper(Type* type, Typer* t) {
}
-Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
+Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
return TypeUnaryOp(node, JSTypeOfTyper);
}
@@ -1221,51 +1112,50 @@ Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
// JS conversion operators.
-Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
+Type* Typer::Visitor::TypeJSToBoolean(Node* node) {
return TypeUnaryOp(node, ToBoolean);
}
-Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
+Type* Typer::Visitor::TypeJSToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
-Bounds Typer::Visitor::TypeJSToString(Node* node) {
+Type* Typer::Visitor::TypeJSToString(Node* node) {
return TypeUnaryOp(node, ToString);
}
-Bounds Typer::Visitor::TypeJSToName(Node* node) {
- return Bounds(Type::None(), Type::Name());
-}
+Type* Typer::Visitor::TypeJSToName(Node* node) { return Type::Name(); }
-Bounds Typer::Visitor::TypeJSToObject(Node* node) {
- return Bounds(Type::None(), Type::Receiver());
-}
+Type* Typer::Visitor::TypeJSToObject(Node* node) { return Type::Receiver(); }
// JS object operators.
-Bounds Typer::Visitor::TypeJSCreate(Node* node) {
- return Bounds(Type::None(), Type::Object());
+Type* Typer::Visitor::TypeJSCreate(Node* node) { return Type::Object(); }
+
+
+Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
+ return Type::OtherObject();
}
-Bounds Typer::Visitor::TypeJSCreateClosure(Node* node) {
- return Bounds(Type::None(), Type::OtherObject());
+Type* Typer::Visitor::TypeJSCreateClosure(Node* node) {
+ return Type::OtherObject();
}
-Bounds Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
- return Bounds(Type::None(), Type::OtherObject());
+Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
+ return Type::None(), Type::OtherObject();
}
-Bounds Typer::Visitor::TypeJSCreateLiteralObject(Node* node) {
- return Bounds(Type::None(), Type::OtherObject());
+Type* Typer::Visitor::TypeJSCreateLiteralObject(Node* node) {
+ return Type::OtherObject();
}
@@ -1279,19 +1169,15 @@ Type* Typer::Visitor::JSLoadPropertyTyper(Type* object, Type* name, Typer* t) {
}
-Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
+Type* Typer::Visitor::TypeJSLoadProperty(Node* node) {
return TypeBinaryOp(node, JSLoadPropertyTyper);
}
-Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeJSLoadNamed(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeJSLoadGlobal(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeJSLoadGlobal(Node* node) { return Type::Any(); }
// Returns a somewhat larger range if we previously assigned
@@ -1375,150 +1261,114 @@ Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
}
-Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
+Type* Typer::Visitor::TypeJSStoreProperty(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
+Type* Typer::Visitor::TypeJSStoreNamed(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSStoreGlobal(Node* node) {
+Type* Typer::Visitor::TypeJSStoreGlobal(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSHasProperty(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSInstanceOf(Node* node) {
+ return Type::Boolean(zone());
}
// JS context operators.
-Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
- ContextAccess access = OpParameter<ContextAccess>(node);
- Bounds outer = Operand(node, 0);
- Type* context_type = outer.upper;
- if (context_type->Is(Type::None())) {
- // Upper bound of context is not yet known.
- return Bounds(Type::None(), Type::Any());
- }
-
- DCHECK(context_type->Maybe(Type::Internal()));
- // TODO(rossberg): More precisely, instead of the above assertion, we should
- // back-propagate the constraint that it has to be a subtype of Internal.
-
- MaybeHandle<Context> context;
- if (context_type->IsConstant()) {
- context = Handle<Context>::cast(context_type->AsConstant()->Value());
- }
- // Walk context chain (as far as known), mirroring dynamic lookup.
- // Since contexts are mutable, the information is only useful as a lower
- // bound.
- for (size_t i = access.depth(); i > 0; --i) {
- if (context_type->IsContext()) {
- context_type = context_type->AsContext()->Outer();
- if (context_type->IsConstant()) {
- context = Handle<Context>::cast(context_type->AsConstant()->Value());
- }
- } else if (!context.is_null()) {
- context = handle(context.ToHandleChecked()->previous(), isolate());
- }
- }
- Type* lower = Type::None();
- if (!context.is_null()) {
- lower = TypeConstant(
- handle(context.ToHandleChecked()->get(static_cast<int>(access.index())),
- isolate()));
- }
- return Bounds(lower, Type::Any());
+Type* Typer::Visitor::TypeJSLoadContext(Node* node) {
+ // Since contexts are mutable, we just return the top.
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
+Type* Typer::Visitor::TypeJSStoreContext(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeJSLoadDynamicGlobal(Node* node) {
- return Bounds::Unbounded(zone());
+Type* Typer::Visitor::TypeJSLoadDynamicGlobal(Node* node) {
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeJSLoadDynamicContext(Node* node) {
- return Bounds::Unbounded(zone());
+Type* Typer::Visitor::TypeJSLoadDynamicContext(Node* node) {
+ return Type::Any();
}
-Bounds Typer::Visitor::WrapContextBoundsForInput(Node* node) {
- Bounds outer = BoundsOrNone(NodeProperties::GetContextInput(node));
- if (outer.upper->Is(Type::None())) {
- return Bounds(Type::None());
+Type* Typer::Visitor::WrapContextTypeForInput(Node* node) {
+ Type* outer = TypeOrNone(NodeProperties::GetContextInput(node));
+ if (outer->Is(Type::None())) {
+ return Type::None();
} else {
- DCHECK(outer.upper->Maybe(Type::Internal()));
- return Bounds(Type::Context(outer.upper, zone()));
+ DCHECK(outer->Maybe(Type::Internal()));
+ return Type::Context(outer, zone());
}
}
-Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
- return WrapContextBoundsForInput(node);
+Type* Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
- return WrapContextBoundsForInput(node);
+Type* Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
- return WrapContextBoundsForInput(node);
+Type* Typer::Visitor::TypeJSCreateWithContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
- return WrapContextBoundsForInput(node);
+Type* Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
+Type* Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
// TODO(rossberg): this is probably incorrect
- return WrapContextBoundsForInput(node);
+ return WrapContextTypeForInput(node);
}
-Bounds Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
- return WrapContextBoundsForInput(node);
+Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
+ return WrapContextTypeForInput(node);
}
// JS other operators.
-Bounds Typer::Visitor::TypeJSYield(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeJSYield(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
- return Bounds(Type::None(), Type::Receiver());
+Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
+ return Type::Receiver();
}
@@ -1527,25 +1377,24 @@ Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
}
-Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
+Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
return TypeUnaryOp(node, JSCallFunctionTyper); // We ignore argument types.
}
-Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
+Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
switch (CallRuntimeParametersOf(node->op()).id()) {
case Runtime::kInlineIsSmi:
- case Runtime::kInlineIsNonNegativeSmi:
case Runtime::kInlineIsArray:
case Runtime::kInlineIsDate:
case Runtime::kInlineIsTypedArray:
case Runtime::kInlineIsMinusZero:
case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+ return Type::Boolean(zone());
case Runtime::kInlineDoubleLo:
case Runtime::kInlineDoubleHi:
- return Bounds(Type::None(zone()), Type::Signed32());
+ return Type::Signed32();
case Runtime::kInlineConstructDouble:
case Runtime::kInlineDateField:
case Runtime::kInlineMathFloor:
@@ -1554,149 +1403,143 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineMathAsin:
case Runtime::kInlineMathAtan:
case Runtime::kInlineMathAtan2:
- return Bounds(Type::None(zone()), Type::Number());
+ return Type::Number();
case Runtime::kInlineMathClz32:
- return Bounds(Type::None(), Type::Range(0, 32, zone()));
+ return Type::Range(0, 32, zone());
case Runtime::kInlineStringGetLength:
- return Bounds(Type::None(), Type::Range(0, String::kMaxLength, zone()));
+ return Type::Range(0, String::kMaxLength, zone());
case Runtime::kInlineToObject:
- return Bounds(Type::None(), Type::Receiver());
+ return Type::Receiver();
default:
break;
}
- return Bounds::Unbounded(zone());
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeJSForInNext(Node* node) {
- return Bounds(Type::None(zone()),
- Type::Union(Type::Name(), Type::Undefined(), zone()));
+Type* Typer::Visitor::TypeJSForInNext(Node* node) {
+ return Type::Union(Type::Name(), Type::Undefined(), zone());
}
-Bounds Typer::Visitor::TypeJSForInPrepare(Node* node) {
+Type* Typer::Visitor::TypeJSForInPrepare(Node* node) {
// TODO(bmeurer): Return a tuple type here.
- return Bounds::Unbounded(zone());
+ return Type::Any();
}
-Bounds Typer::Visitor::TypeJSForInDone(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeJSForInDone(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeJSForInStep(Node* node) {
+Type* Typer::Visitor::TypeJSForInStep(Node* node) {
STATIC_ASSERT(Map::EnumLengthBits::kMax <= FixedArray::kMaxLength);
- return Bounds(Type::None(zone()),
- Type::Range(1, FixedArray::kMaxLength + 1, zone()));
+ return Type::Range(1, FixedArray::kMaxLength + 1, zone());
}
-Bounds Typer::Visitor::TypeJSStackCheck(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
// Simplified operators.
-Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeBooleanNot(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
+Type* Typer::Visitor::TypeBooleanToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
-Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeNumberEqual(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeNumberLessThan(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
-}
+Type* Typer::Visitor::TypeNumberAdd(Node* node) { return Type::Number(zone()); }
-Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
+Type* Typer::Visitor::TypeNumberSubtract(Node* node) {
+ return Type::Number(zone());
}
-Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
+Type* Typer::Visitor::TypeNumberMultiply(Node* node) {
+ return Type::Number(zone());
}
-Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
+Type* Typer::Visitor::TypeNumberDivide(Node* node) {
+ return Type::Number(zone());
}
-Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
- return Bounds(Type::None(zone()), Type::Number(zone()));
+Type* Typer::Visitor::TypeNumberModulus(Node* node) {
+ return Type::Number(zone());
}
-Bounds Typer::Visitor::TypeNumberShiftLeft(Node* node) {
- return Bounds(Type::None(zone()), Type::Signed32(zone()));
+Type* Typer::Visitor::TypeNumberShiftLeft(Node* node) {
+ return Type::Signed32(zone());
}
-Bounds Typer::Visitor::TypeNumberShiftRight(Node* node) {
- return Bounds(Type::None(zone()), Type::Signed32(zone()));
+Type* Typer::Visitor::TypeNumberShiftRight(Node* node) {
+ return Type::Signed32(zone());
}
-Bounds Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
- return Bounds(Type::None(zone()), Type::Unsigned32(zone()));
+Type* Typer::Visitor::TypeNumberShiftRightLogical(Node* node) {
+ return Type::Unsigned32(zone());
}
-Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
+Type* Typer::Visitor::TypeNumberToInt32(Node* node) {
return TypeUnaryOp(node, NumberToInt32);
}
-Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
+Type* Typer::Visitor::TypeNumberToUint32(Node* node) {
return TypeUnaryOp(node, NumberToUint32);
}
-Bounds Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
+Type* Typer::Visitor::TypePlainPrimitiveToNumber(Node* node) {
return TypeUnaryOp(node, ToNumber);
}
-Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeReferenceEqual(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeStringEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeStringEqual(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeStringLessThan(Node* node) {
+ return Type::Boolean(zone());
}
-Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
- return Bounds(Type::None(zone()), Type::Boolean(zone()));
+Type* Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
+ return Type::Boolean(zone());
}
@@ -1710,597 +1553,475 @@ Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
} // namespace
-Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedSigned32(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedSigned32(), zone()));
+Type* Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ return ChangeRepresentation(arg, Type::UntaggedSigned32(), zone());
}
-Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedUnsigned32(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedUnsigned32(), zone()));
+Type* Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+ return ChangeRepresentation(arg, Type::UntaggedUnsigned32(), zone());
}
-Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Number()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedFloat64(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedFloat64(), zone()));
+Type* Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Number()));
+ return ChangeRepresentation(arg, Type::UntaggedFloat64(), zone());
}
-Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
- Type* lower_rep = arg.lower->Is(Type::SignedSmall()) ? Type::TaggedSigned()
- : Type::Tagged();
- Type* upper_rep = arg.upper->Is(Type::SignedSmall()) ? Type::TaggedSigned()
- : Type::Tagged();
- return Bounds(ChangeRepresentation(arg.lower, lower_rep, zone()),
- ChangeRepresentation(arg.upper, upper_rep, zone()));
+Type* Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Signed32()));
+ Type* rep =
+ arg->Is(Type::SignedSmall()) ? Type::TaggedSigned() : Type::Tagged();
+ return ChangeRepresentation(arg, rep, zone());
}
-Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
- Bounds arg = Operand(node, 0);
- // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
- ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
+Type* Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+ Type* arg = Operand(node, 0);
+ // TODO(neis): DCHECK(arg->Is(Type::Unsigned32()));
+ return ChangeRepresentation(arg, Type::Tagged(), zone());
}
-Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
- Bounds arg = Operand(node, 0);
+Type* Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
+ Type* arg = Operand(node, 0);
// TODO(neis): CHECK(arg.upper->Is(Type::Number()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
- ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
+ return ChangeRepresentation(arg, Type::Tagged(), zone());
}
-Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
- Bounds arg = Operand(node, 0);
+Type* Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+ Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::UntaggedBit(), zone()),
- ChangeRepresentation(arg.upper, Type::UntaggedBit(), zone()));
+ return ChangeRepresentation(arg, Type::UntaggedBit(), zone());
}
-Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
- Bounds arg = Operand(node, 0);
+Type* Typer::Visitor::TypeChangeBitToBool(Node* node) {
+ Type* arg = Operand(node, 0);
// TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
- return Bounds(ChangeRepresentation(arg.lower, Type::TaggedPointer(), zone()),
- ChangeRepresentation(arg.upper, Type::TaggedPointer(), zone()));
+ return ChangeRepresentation(arg, Type::TaggedPointer(), zone());
}
-Bounds Typer::Visitor::TypeAllocate(Node* node) {
- return Bounds(Type::TaggedPointer());
-}
+Type* Typer::Visitor::TypeAllocate(Node* node) { return Type::TaggedPointer(); }
-Bounds Typer::Visitor::TypeLoadField(Node* node) {
- return Bounds(FieldAccessOf(node->op()).type);
+Type* Typer::Visitor::TypeLoadField(Node* node) {
+ return FieldAccessOf(node->op()).type;
}
-Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
+Type* Typer::Visitor::TypeLoadBuffer(Node* node) {
// TODO(bmeurer): This typing is not yet correct. Since we can still access
// out of bounds, the type in the general case has to include Undefined.
switch (BufferAccessOf(node->op()).external_array_type()) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
case kExternal##Type##Array: \
- return Bounds(typer_->cache_.k##Type);
+ return typer_->cache_.k##Type;
TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
}
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeLoadElement(Node* node) {
- return Bounds(ElementAccessOf(node->op()).type);
+Type* Typer::Visitor::TypeLoadElement(Node* node) {
+ return ElementAccessOf(node->op()).type;
}
-Bounds Typer::Visitor::TypeStoreField(Node* node) {
+Type* Typer::Visitor::TypeStoreField(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeStoreBuffer(Node* node) {
+Type* Typer::Visitor::TypeStoreBuffer(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeStoreElement(Node* node) {
+Type* Typer::Visitor::TypeStoreElement(Node* node) {
UNREACHABLE();
- return Bounds();
-}
-
-
-Bounds Typer::Visitor::TypeObjectIsSmi(Node* node) {
- return Bounds(Type::Boolean());
+ return nullptr;
}
-Bounds Typer::Visitor::TypeObjectIsNonNegativeSmi(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeObjectIsSmi(Node* node) { return Type::Boolean(); }
// Machine operators.
-Bounds Typer::Visitor::TypeLoad(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeLoad(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeStore(Node* node) {
+Type* Typer::Visitor::TypeStore(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
-Bounds Typer::Visitor::TypeWord32And(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32And(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Or(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Or(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Xor(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Xor(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Shl(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Shl(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Shr(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Shr(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Sar(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Sar(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Ror(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Ror(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord32Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeWord32Equal(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeWord32Clz(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeWord32Clz(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeWord64And(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64And(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Or(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64Or(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Xor(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64Xor(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Shl(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64Shl(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Shr(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64Shr(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Sar(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64Sar(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Ror(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeWord64Ror(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeWord64Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeWord64Equal(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeInt32Add(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeInt32Add(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
- return Bounds(Type::Internal());
+Type* Typer::Visitor::TypeInt32AddWithOverflow(Node* node) {
+ return Type::Internal();
}
-Bounds Typer::Visitor::TypeInt32Sub(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeInt32Sub(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
- return Bounds(Type::Internal());
+Type* Typer::Visitor::TypeInt32SubWithOverflow(Node* node) {
+ return Type::Internal();
}
-Bounds Typer::Visitor::TypeInt32Mul(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeInt32Mul(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeInt32MulHigh(Node* node) {
- return Bounds(Type::Signed32());
-}
+Type* Typer::Visitor::TypeInt32MulHigh(Node* node) { return Type::Signed32(); }
-Bounds Typer::Visitor::TypeInt32Div(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeInt32Div(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeInt32Mod(Node* node) {
- return Bounds(Type::Integral32());
-}
+Type* Typer::Visitor::TypeInt32Mod(Node* node) { return Type::Integral32(); }
-Bounds Typer::Visitor::TypeInt32LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeInt32LessThan(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeInt32LessThanOrEqual(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeUint32Div(Node* node) {
- return Bounds(Type::Unsigned32());
-}
+Type* Typer::Visitor::TypeUint32Div(Node* node) { return Type::Unsigned32(); }
-Bounds Typer::Visitor::TypeUint32LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeUint32LessThan(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeUint32LessThanOrEqual(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeUint32Mod(Node* node) {
- return Bounds(Type::Unsigned32());
-}
+Type* Typer::Visitor::TypeUint32Mod(Node* node) { return Type::Unsigned32(); }
-Bounds Typer::Visitor::TypeUint32MulHigh(Node* node) {
- return Bounds(Type::Unsigned32());
+Type* Typer::Visitor::TypeUint32MulHigh(Node* node) {
+ return Type::Unsigned32();
}
-Bounds Typer::Visitor::TypeInt64Add(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeInt64Add(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeInt64Sub(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeInt64Sub(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeInt64Mul(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeInt64Mul(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeInt64Div(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeInt64Div(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeInt64Mod(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeInt64Mod(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeInt64LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeInt64LessThan(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeInt64LessThanOrEqual(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeUint64Div(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeUint64Div(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeUint64LessThan(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeUint64LessThan(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeUint64LessThanOrEqual(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeUint64Mod(Node* node) {
- return Bounds(Type::Internal());
-}
+Type* Typer::Visitor::TypeUint64Mod(Node* node) { return Type::Internal(); }
-Bounds Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
- return Bounds(Type::Intersect(
- Type::Number(), Type::UntaggedFloat64(), zone()));
+Type* Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
+ return Type::Intersect(Type::Number(), Type::UntaggedFloat64(), zone());
}
-Bounds Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedSigned32(), zone()));
+Type* Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
}
-Bounds Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Unsigned32(), Type::UntaggedUnsigned32(), zone()));
+Type* Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedUnsigned32(),
+ zone());
}
-Bounds Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedFloat64(), zone()));
+Type* Typer::Visitor::TypeChangeInt32ToFloat64(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedFloat64(), zone());
}
-Bounds Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
- return Bounds(Type::Internal());
+Type* Typer::Visitor::TypeChangeInt32ToInt64(Node* node) {
+ return Type::Internal();
}
-Bounds Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
- return Bounds(Type::Intersect(
- Type::Unsigned32(), Type::UntaggedFloat64(), zone()));
+Type* Typer::Visitor::TypeChangeUint32ToFloat64(Node* node) {
+ return Type::Intersect(Type::Unsigned32(), Type::UntaggedFloat64(), zone());
}
-Bounds Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
- return Bounds(Type::Internal());
+Type* Typer::Visitor::TypeChangeUint32ToUint64(Node* node) {
+ return Type::Internal();
}
-Bounds Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Number(), Type::UntaggedFloat32(), zone()));
+Type* Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
+ return Type::Intersect(Type::Number(), Type::UntaggedFloat32(), zone());
}
-Bounds Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedSigned32(), zone()));
+Type* Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
}
-Bounds Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
- return Bounds(Type::Intersect(
- Type::Signed32(), Type::UntaggedSigned32(), zone()));
+Type* Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone());
}
-Bounds Typer::Visitor::TypeFloat32Add(Node* node) {
- return Bounds(Type::Number());
+Type* Typer::Visitor::TypeBitcastFloat32ToInt32(Node* node) {
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat32Sub(Node* node) {
- return Bounds(Type::Number());
+Type* Typer::Visitor::TypeBitcastFloat64ToInt64(Node* node) {
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat32Mul(Node* node) {
- return Bounds(Type::Number());
+Type* Typer::Visitor::TypeBitcastInt32ToFloat32(Node* node) {
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat32Div(Node* node) {
- return Bounds(Type::Number());
+Type* Typer::Visitor::TypeBitcastInt64ToFloat64(Node* node) {
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat32Max(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat32Add(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat32Min(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat32Sub(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat32Abs(Node* node) {
+Type* Typer::Visitor::TypeFloat32Mul(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Div(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Max(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Min(Node* node) { return Type::Number(); }
+
+
+Type* Typer::Visitor::TypeFloat32Abs(Node* node) {
// TODO(turbofan): We should be able to infer a better type here.
- return Bounds(Type::Number());
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat32Sqrt(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat32Sqrt(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat32Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeFloat32Equal(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeFloat32LessThan(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeFloat32LessThan(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeFloat32LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeFloat32LessThanOrEqual(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeFloat64Add(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Add(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Sub(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Sub(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Mul(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Mul(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Div(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Div(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Mod(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Mod(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Max(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Max(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Min(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Min(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Abs(Node* node) {
+Type* Typer::Visitor::TypeFloat64Abs(Node* node) {
// TODO(turbofan): We should be able to infer a better type here.
- return Bounds(Type::Number());
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
- return Bounds(Type::Number());
-}
+Type* Typer::Visitor::TypeFloat64Sqrt(Node* node) { return Type::Number(); }
-Bounds Typer::Visitor::TypeFloat64Equal(Node* node) {
- return Bounds(Type::Boolean());
-}
+Type* Typer::Visitor::TypeFloat64Equal(Node* node) { return Type::Boolean(); }
-Bounds Typer::Visitor::TypeFloat64LessThan(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeFloat64LessThan(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
- return Bounds(Type::Boolean());
+Type* Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
+ return Type::Boolean();
}
-Bounds Typer::Visitor::TypeFloat64RoundDown(Node* node) {
+Type* Typer::Visitor::TypeFloat64RoundDown(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
+Type* Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
+Type* Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
- return Bounds(Type::Number());
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
- return Bounds(Type::Signed32());
+Type* Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
+ return Type::Signed32();
}
-Bounds Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
- return Bounds(Type::Signed32());
+Type* Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
+ return Type::Signed32();
}
-Bounds Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
- return Bounds(Type::Number());
+Type* Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
- return Bounds(Type::Number());
+Type* Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
+ return Type::Number();
}
-Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
- return Bounds(Type::Internal());
+Type* Typer::Visitor::TypeLoadStackPointer(Node* node) {
+ return Type::Internal();
}
-Bounds Typer::Visitor::TypeLoadFramePointer(Node* node) {
- return Bounds(Type::Internal());
+Type* Typer::Visitor::TypeLoadFramePointer(Node* node) {
+ return Type::Internal();
}
-Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
- return Bounds::Unbounded(zone());
-}
+Type* Typer::Visitor::TypeCheckedLoad(Node* node) { return Type::Any(); }
-Bounds Typer::Visitor::TypeCheckedStore(Node* node) {
+Type* Typer::Visitor::TypeCheckedStore(Node* node) {
UNREACHABLE();
- return Bounds();
+ return nullptr;
}
@@ -2374,6 +2095,9 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
#undef TYPED_ARRAY_CASE
}
}
+ if (Type::IsInteger(*value)) {
+ return Type::Range(value->Number(), value->Number(), zone());
+ }
return Type::Constant(value, zone());
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index f5ef4f1553..065262907b 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -10,10 +10,11 @@
namespace v8 {
namespace internal {
-namespace compiler {
// Forward declarations.
-class TyperCache;
+class ZoneTypeCache;
+
+namespace compiler {
class Typer {
@@ -39,7 +40,7 @@ class Typer {
Graph* const graph_;
Type::FunctionType* function_type_;
Decorator* decorator_;
- TyperCache const& cache_;
+ ZoneTypeCache const& cache_;
Type* singleton_false_;
Type* singleton_true_;
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 690fd04577..57bcef16a0 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -22,6 +22,7 @@
#include "src/compiler/schedule.h"
#include "src/compiler/simplified-operator.h"
#include "src/ostreams.h"
+#include "src/types-inl.h"
namespace v8 {
namespace internal {
@@ -50,11 +51,6 @@ class Verifier::Visitor {
Typing typing;
private:
- // TODO(rossberg): Get rid of these once we got rid of NodeProperties.
- Bounds bounds(Node* node) { return NodeProperties::GetBounds(node); }
- Node* ValueInput(Node* node, int i = 0) {
- return NodeProperties::GetValueInput(node, i);
- }
void CheckNotTyped(Node* node) {
if (NodeProperties::IsTyped(node)) {
std::ostringstream str;
@@ -64,35 +60,35 @@ class Verifier::Visitor {
}
}
void CheckUpperIs(Node* node, Type* type) {
- if (typing == TYPED && !bounds(node).upper->Is(type)) {
+ if (typing == TYPED && !NodeProperties::GetType(node)->Is(type)) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
- << " upper bound ";
- bounds(node).upper->PrintTo(str);
+ << " type ";
+ NodeProperties::GetType(node)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
FATAL(str.str().c_str());
}
}
void CheckUpperMaybe(Node* node, Type* type) {
- if (typing == TYPED && !bounds(node).upper->Maybe(type)) {
+ if (typing == TYPED && !NodeProperties::GetType(node)->Maybe(type)) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
- << " upper bound ";
- bounds(node).upper->PrintTo(str);
+ << " type ";
+ NodeProperties::GetType(node)->PrintTo(str);
str << " must intersect ";
type->PrintTo(str);
FATAL(str.str().c_str());
}
}
void CheckValueInputIs(Node* node, int i, Type* type) {
- Node* input = ValueInput(node, i);
- if (typing == TYPED && !bounds(input).upper->Is(type)) {
+ Node* input = NodeProperties::GetValueInput(node, i);
+ if (typing == TYPED && !NodeProperties::GetType(input)->Is(type)) {
std::ostringstream str;
str << "TypeError: node #" << node->id() << ":" << *node->op()
<< "(input @" << i << " = " << input->opcode() << ":"
- << input->op()->mnemonic() << ") upper bound ";
- bounds(input).upper->PrintTo(str);
+ << input->op()->mnemonic() << ") type ";
+ NodeProperties::GetType(input)->PrintTo(str);
str << " is not ";
type->PrintTo(str);
FATAL(str.str().c_str());
@@ -397,9 +393,7 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(rossberg): for now at least, narrowing does not really hold.
/*
for (int i = 0; i < value_count; ++i) {
- // TODO(rossberg, jarin): Figure out what to do about lower bounds.
- // CHECK(bounds(node).lower->Is(bounds(ValueInput(node, i)).lower));
- CHECK(bounds(ValueInput(node, i)).upper->Is(bounds(node).upper));
+ CHECK(type_of(ValueInput(node, i))->Is(type_of(node)));
}
*/
break;
@@ -426,8 +420,8 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(rossberg): what are the constraints on these?
// Type must be subsumed by input type.
if (typing == TYPED) {
- CHECK(bounds(ValueInput(node)).lower->Is(bounds(node).lower));
- CHECK(bounds(ValueInput(node)).upper->Is(bounds(node).upper));
+ Node* val = NodeProperties::GetValueInput(node, 0);
+ CHECK(NodeProperties::GetType(val)->Is(NodeProperties::GetType(node)));
}
break;
}
@@ -510,6 +504,10 @@ void Verifier::Visitor::Check(Node* node) {
// Type is Object.
CheckUpperIs(node, Type::Object());
break;
+ case IrOpcode::kJSCreateArguments:
+ // Type is OtherObject.
+ CheckUpperIs(node, Type::OtherObject());
+ break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
CheckUpperIs(node, Type::OtherObject());
@@ -563,7 +561,7 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(rossberg): This should really be Is(Internal), but the typer
// currently can't do backwards propagation.
CheckUpperMaybe(context, Type::Internal());
- if (typing == TYPED) CHECK(bounds(node).upper->IsContext());
+ if (typing == TYPED) CHECK(NodeProperties::GetType(node)->IsContext());
break;
}
@@ -590,7 +588,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
case IrOpcode::kJSForInNext: {
- CheckUpperIs(node, Type::Union(Type::Name(), Type::Undefined()));
+ CheckUpperIs(node, Type::Union(Type::Name(), Type::Undefined(), zone));
break;
}
case IrOpcode::kJSForInStep: {
@@ -683,10 +681,6 @@ void Verifier::Visitor::Check(Node* node) {
CheckValueInputIs(node, 0, Type::Any());
CheckUpperIs(node, Type::Boolean());
break;
- case IrOpcode::kObjectIsNonNegativeSmi:
- CheckValueInputIs(node, 0, Type::Any());
- CheckUpperIs(node, Type::Boolean());
- break;
case IrOpcode::kAllocate:
CheckValueInputIs(node, 0, Type::PlainNumber());
CheckUpperIs(node, Type::TaggedPointer());
@@ -872,6 +866,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kTruncateInt64ToInt32:
case IrOpcode::kTruncateFloat64ToFloat32:
case IrOpcode::kTruncateFloat64ToInt32:
+ case IrOpcode::kBitcastFloat32ToInt32:
+ case IrOpcode::kBitcastFloat64ToInt64:
+ case IrOpcode::kBitcastInt32ToFloat32:
+ case IrOpcode::kBitcastInt64ToFloat64:
case IrOpcode::kChangeInt32ToInt64:
case IrOpcode::kChangeUint32ToUint64:
case IrOpcode::kChangeInt32ToFloat64:
@@ -1152,6 +1150,68 @@ void ScheduleVerifier::Run(Schedule* schedule) {
}
}
}
+
+
+#ifdef DEBUG
+
+// static
+void Verifier::VerifyNode(Node* node) {
+ CHECK_EQ(OperatorProperties::GetTotalInputCount(node->op()),
+ node->InputCount());
+ // If this node has no effect or no control outputs,
+ // we check that no its uses are effect or control inputs.
+ bool check_no_control = node->op()->ControlOutputCount() == 0;
+ bool check_no_effect = node->op()->EffectOutputCount() == 0;
+ bool check_no_frame_state = node->opcode() != IrOpcode::kFrameState;
+ if (check_no_effect || check_no_control) {
+ for (Edge edge : node->use_edges()) {
+ Node* const user = edge.from();
+ CHECK(!user->IsDead());
+ if (NodeProperties::IsControlEdge(edge)) {
+ CHECK(!check_no_control);
+ } else if (NodeProperties::IsEffectEdge(edge)) {
+ CHECK(!check_no_effect);
+ } else if (NodeProperties::IsFrameStateEdge(edge)) {
+ CHECK(!check_no_frame_state);
+ }
+ }
+ }
+ // Frame state inputs should be frame states (or sentinels).
+ for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(node->op());
+ i++) {
+ Node* input = NodeProperties::GetFrameStateInput(node, i);
+ CHECK(input->opcode() == IrOpcode::kFrameState ||
+ input->opcode() == IrOpcode::kStart ||
+ input->opcode() == IrOpcode::kDead);
+ }
+ // Effect inputs should be effect-producing nodes (or sentinels).
+ for (int i = 0; i < node->op()->EffectInputCount(); i++) {
+ Node* input = NodeProperties::GetEffectInput(node, i);
+ CHECK(input->op()->EffectOutputCount() > 0 ||
+ input->opcode() == IrOpcode::kDead);
+ }
+ // Control inputs should be control-producing nodes (or sentinels).
+ for (int i = 0; i < node->op()->ControlInputCount(); i++) {
+ Node* input = NodeProperties::GetControlInput(node, i);
+ CHECK(input->op()->ControlOutputCount() > 0 ||
+ input->opcode() == IrOpcode::kDead);
+ }
+}
+
+
+void Verifier::VerifyEdgeInputReplacement(const Edge& edge,
+ const Node* replacement) {
+ // Check that the user does not misuse the replacement.
+ DCHECK(!NodeProperties::IsControlEdge(edge) ||
+ replacement->op()->ControlOutputCount() > 0);
+ DCHECK(!NodeProperties::IsEffectEdge(edge) ||
+ replacement->op()->EffectOutputCount() > 0);
+ DCHECK(!NodeProperties::IsFrameStateEdge(edge) ||
+ replacement->opcode() == IrOpcode::kFrameState);
+}
+
+#endif // DEBUG
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/verifier.h b/deps/v8/src/compiler/verifier.h
index 8c5962fcad..cee323e480 100644
--- a/deps/v8/src/compiler/verifier.h
+++ b/deps/v8/src/compiler/verifier.h
@@ -12,6 +12,8 @@ namespace internal {
namespace compiler {
class Graph;
+class Edge;
+class Node;
class Schedule;
// Verifies properties of a graph, such as the well-formedness of inputs to
@@ -22,6 +24,28 @@ class Verifier {
static void Run(Graph* graph, Typing typing = TYPED);
+#ifdef DEBUG
+ // Verifies consistency of node inputs and uses:
+ // - node inputs should agree with the input count computed from
+ // the node's operator.
+ // - effect inputs should have effect outputs.
+ // - control inputs should have control outputs.
+ // - frame state inputs should be frame states.
+ // - if the node has control uses, it should produce control.
+ // - if the node has effect uses, it should produce effect.
+ // - if the node has frame state uses, it must be a frame state.
+ static void VerifyNode(Node* node);
+
+ // Verify that {replacement} has the required outputs
+ // (effect, control or frame state) to be used as an input for {edge}.
+ static void VerifyEdgeInputReplacement(const Edge& edge,
+ const Node* replacement);
+#else
+ static void VerifyNode(Node* node) {}
+ static void VerifyEdgeInputReplacement(const Edge& edge,
+ const Node* replacement) {}
+#endif // DEBUG
+
private:
class Visitor;
DISALLOW_COPY_AND_ASSIGN(Verifier);
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 3ad07ad7de..4c991718f8 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -1171,6 +1171,34 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
+ case kX64BitcastFI:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kX64BitcastDL:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movq(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movq(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kX64BitcastIF:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movss(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
+ case kX64BitcastLD:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movq(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movsd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kX64Lea32: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
@@ -1271,6 +1299,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedLoadWord32:
ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
break;
+ case kCheckedLoadWord64:
+ ASSEMBLE_CHECKED_LOAD_INTEGER(movq);
+ break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
break;
@@ -1286,6 +1317,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreWord32:
ASSEMBLE_CHECKED_STORE_INTEGER(movl);
break;
+ case kCheckedStoreWord64:
+ ASSEMBLE_CHECKED_STORE_INTEGER(movq);
+ break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FLOAT(movss);
break;
@@ -1349,6 +1383,9 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
+ default:
+ UNREACHABLE();
+ break;
}
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
@@ -1418,6 +1455,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kNotOverflow:
cc = no_overflow;
break;
+ default:
+ UNREACHABLE();
+ break;
}
__ bind(&check);
__ setcc(cc, reg);
@@ -1760,15 +1800,17 @@ void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
}
}
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 2e10729954..7d3b434d15 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -109,6 +109,10 @@ namespace compiler {
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
V(X64Lea32) \
V(X64Lea) \
V(X64Dec32) \
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 0bcd526322..516a9a7691 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -233,6 +233,9 @@ void InstructionSelector::VisitCheckedLoad(Node* node) {
case kRepWord32:
opcode = kCheckedLoadWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedLoadWord64;
+ break;
case kRepFloat32:
opcode = kCheckedLoadFloat32;
break;
@@ -280,6 +283,9 @@ void InstructionSelector::VisitCheckedStore(Node* node) {
case kRepWord32:
opcode = kCheckedStoreWord32;
break;
+ case kRepWord64:
+ opcode = kCheckedStoreWord64;
+ break;
case kRepFloat32:
opcode = kCheckedStoreFloat32;
break;
@@ -900,6 +906,30 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
}
diff --git a/deps/v8/src/compiler/x87/code-generator-x87.cc b/deps/v8/src/compiler/x87/code-generator-x87.cc
index d39fda6761..9ca9a3076f 100644
--- a/deps/v8/src/compiler/x87/code-generator-x87.cc
+++ b/deps/v8/src/compiler/x87/code-generator-x87.cc
@@ -1069,6 +1069,28 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
}
+ case kX87BitcastFI: {
+ __ fstp(0);
+ __ mov(i.OutputRegister(), MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+ break;
+ }
+ case kX87BitcastIF: {
+ if (instr->InputAt(0)->IsRegister()) {
+ __ lea(esp, Operand(esp, -kFloatSize));
+ __ mov(MemOperand(esp, 0), i.InputRegister(0));
+ __ fstp(0);
+ __ fld_s(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kFloatSize));
+ } else {
+ __ lea(esp, Operand(esp, -kDoubleSize));
+ __ mov(MemOperand(esp, 0), i.InputRegister(0));
+ __ fstp(0);
+ __ fld_d(MemOperand(esp, 0));
+ __ lea(esp, Operand(esp, kDoubleSize));
+ }
+ break;
+ }
case kX87Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
@@ -1224,6 +1246,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
+ case kCheckedLoadWord64:
+ case kCheckedStoreWord64:
+ UNREACHABLE(); // currently unsupported checked int64 load/store.
+ break;
}
} // NOLINT(readability/fn_size)
@@ -1278,6 +1304,9 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
case kNotOverflow:
__ j(no_overflow, tlabel);
break;
+ default:
+ UNREACHABLE();
+ break;
}
// Add a jump if not falling through to the next block.
if (!branch->fallthru) __ jmp(flabel);
@@ -1348,6 +1377,9 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
case kNotOverflow:
cc = no_overflow;
break;
+ default:
+ UNREACHABLE();
+ break;
}
__ bind(&check);
if (reg.is_byte_register()) {
@@ -1838,15 +1870,17 @@ void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
void CodeGenerator::EnsureSpaceForLazyDeopt() {
+ if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
+ return;
+ }
+
int space_needed = Deoptimizer::patch_size();
- if (!info()->IsStub()) {
- // Ensure that we have enough space after the previous lazy-bailout
- // instruction for patching the code here.
- int current_pc = masm()->pc_offset();
- if (current_pc < last_lazy_deopt_pc_ + space_needed) {
- int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
- __ Nop(padding_size);
- }
+ // Ensure that we have enough space after the previous lazy-bailout
+ // instruction for patching the code here.
+ int current_pc = masm()->pc_offset();
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
}
}
diff --git a/deps/v8/src/compiler/x87/instruction-codes-x87.h b/deps/v8/src/compiler/x87/instruction-codes-x87.h
index d1b759be34..9408e41724 100644
--- a/deps/v8/src/compiler/x87/instruction-codes-x87.h
+++ b/deps/v8/src/compiler/x87/instruction-codes-x87.h
@@ -74,6 +74,8 @@ namespace compiler {
V(X87Movss) \
V(X87Movsd) \
V(X87Lea) \
+ V(X87BitcastFI) \
+ V(X87BitcastIF) \
V(X87Push) \
V(X87PushFloat64) \
V(X87PushFloat32) \
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index 95aa70ac92..ac868fb932 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -40,9 +40,9 @@ class X87OperandGenerator final : public OperandGenerator {
case IrOpcode::kHeapConstant: {
// Constants in new space cannot be used as immediates in V8 because
// the GC does not scan code objects when collecting the new generation.
- Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
- Isolate* isolate = value.handle()->GetIsolate();
- return !isolate->heap()->InNewSpace(*value.handle());
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
+ Isolate* isolate = value->GetIsolate();
+ return !isolate->heap()->InNewSpace(*value);
}
default:
return false;
@@ -664,6 +664,19 @@ void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
}
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
+ Emit(kX87BitcastFI, g.DefineAsRegister(node), 0, NULL);
+}
+
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ X87OperandGenerator g(this);
+ Emit(kX87BitcastIF, g.DefineAsFixed(node, stX_0), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitFloat32Add(Node* node) {
X87OperandGenerator g(this);
Emit(kX87PushFloat32, g.NoOutput(), g.Use(node->InputAt(0)));
diff --git a/deps/v8/src/context-measure.cc b/deps/v8/src/context-measure.cc
index 42a08be0af..da4aae498b 100644
--- a/deps/v8/src/context-measure.cc
+++ b/deps/v8/src/context-measure.cc
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/context-measure.h"
+#include "src/base/logging.h"
+#include "src/contexts.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
new file mode 100644
index 0000000000..e88cd33ad3
--- /dev/null
+++ b/deps/v8/src/contexts-inl.h
@@ -0,0 +1,148 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CONTEXTS_INL_H_
+#define V8_CONTEXTS_INL_H_
+
+#include "src/contexts.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+// static
+ScriptContextTable* ScriptContextTable::cast(Object* context) {
+ DCHECK(context->IsScriptContextTable());
+ return reinterpret_cast<ScriptContextTable*>(context);
+}
+
+
+int ScriptContextTable::used() const {
+ return Smi::cast(get(kUsedSlot))->value();
+}
+
+
+void ScriptContextTable::set_used(int used) {
+ set(kUsedSlot, Smi::FromInt(used));
+}
+
+
+// static
+Handle<Context> ScriptContextTable::GetContext(Handle<ScriptContextTable> table,
+ int i) {
+ DCHECK(i < table->used());
+ return Handle<Context>::cast(FixedArray::get(table, i + kFirstContextSlot));
+}
+
+
+// static
+Context* Context::cast(Object* context) {
+ DCHECK(context->IsContext());
+ return reinterpret_cast<Context*>(context);
+}
+
+
+JSFunction* Context::closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
+void Context::set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
+
+
+Context* Context::previous() {
+ Object* result = get(PREVIOUS_INDEX);
+ DCHECK(IsBootstrappingOrValidParentContext(result, this));
+ return reinterpret_cast<Context*>(result);
+}
+void Context::set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
+
+
+bool Context::has_extension() { return extension() != nullptr; }
+Object* Context::extension() { return get(EXTENSION_INDEX); }
+void Context::set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+
+
+JSModule* Context::module() { return JSModule::cast(get(EXTENSION_INDEX)); }
+void Context::set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
+
+
+GlobalObject* Context::global_object() {
+ Object* result = get(GLOBAL_OBJECT_INDEX);
+ DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
+ return reinterpret_cast<GlobalObject*>(result);
+}
+
+
+void Context::set_global_object(GlobalObject* object) {
+ set(GLOBAL_OBJECT_INDEX, object);
+}
+
+
+bool Context::IsNativeContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->native_context_map();
+}
+
+
+bool Context::IsFunctionContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->function_context_map();
+}
+
+
+bool Context::IsCatchContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->catch_context_map();
+}
+
+
+bool Context::IsWithContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->with_context_map();
+}
+
+
+bool Context::IsBlockContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->block_context_map();
+}
+
+
+bool Context::IsModuleContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->module_context_map();
+}
+
+
+bool Context::IsScriptContext() {
+ Map* map = this->map();
+ return map == map->GetHeap()->script_context_map();
+}
+
+
+bool Context::HasSameSecurityTokenAs(Context* that) {
+ return this->global_object()->native_context()->security_token() ==
+ that->global_object()->native_context()->security_token();
+}
+
+
+#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
+ void Context::set_##name(type* value) { \
+ DCHECK(IsNativeContext()); \
+ set(index, value); \
+ } \
+ bool Context::is_##name(type* value) { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index)) == value; \
+ } \
+ type* Context::name() { \
+ DCHECK(IsNativeContext()); \
+ return type::cast(get(index)); \
+ }
+NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
+#undef NATIVE_CONTEXT_FIELD_ACCESSORS
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CONTEXTS_INL_H_
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index ef850452ce..a008d49ac3 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -6,6 +6,7 @@
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
+#include "src/isolate-inl.h"
#include "src/scopeinfo.h"
namespace v8 {
@@ -18,7 +19,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
int used = table->used();
int length = table->length();
CHECK(used >= 0 && length > 0 && used < length);
- if (used + 1 == length) {
+ if (used + kFirstContextSlot == length) {
CHECK(length < Smi::kMaxValue / 2);
Isolate* isolate = table->GetIsolate();
Handle<FixedArray> copy =
@@ -31,7 +32,7 @@ Handle<ScriptContextTable> ScriptContextTable::Extend(
result->set_used(used + 1);
DCHECK(script_context->IsScriptContext());
- result->set(used + 1, *script_context);
+ result->set(used + kFirstContextSlot, *script_context);
return result;
}
@@ -41,12 +42,12 @@ bool ScriptContextTable::Lookup(Handle<ScriptContextTable> table,
for (int i = 0; i < table->used(); i++) {
Handle<Context> context = GetContext(table, i);
DCHECK(context->IsScriptContext());
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+ Handle<ScopeInfo> scope_info(context->scope_info());
int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &result->mode, &result->location, &result->init_flag,
+ scope_info, name, &result->mode, &result->init_flag,
&result->maybe_assigned_flag);
- if (slot_index >= 0 && result->location == VariableLocation::CONTEXT) {
+ if (slot_index >= 0) {
result->context_index = i;
result->slot_index = slot_index;
return true;
@@ -56,10 +57,22 @@ bool ScriptContextTable::Lookup(Handle<ScriptContextTable> table,
}
+bool Context::is_declaration_context() {
+ if (IsFunctionContext() || IsNativeContext() || IsScriptContext()) {
+ return true;
+ }
+ if (!IsBlockContext()) return false;
+ Object* ext = extension();
+ // If we have the special extension, we immediately know it must be a
+ // declaration scope. That's just a small performance shortcut.
+ return ext->IsSloppyBlockWithEvalContextExtension()
+ || ScopeInfo::cast(ext)->is_declaration_scope();
+}
+
+
Context* Context::declaration_context() {
Context* current = this;
- while (!current->IsFunctionContext() && !current->IsNativeContext() &&
- !current->IsScriptContext()) {
+ while (!current->is_declaration_context()) {
current = current->previous();
DCHECK(current->closure() == closure());
}
@@ -67,6 +80,44 @@ Context* Context::declaration_context() {
}
+JSObject* Context::extension_object() {
+ DCHECK(IsNativeContext() || IsFunctionContext() || IsBlockContext());
+ Object* object = extension();
+ if (object == nullptr) return nullptr;
+ if (IsBlockContext()) {
+ if (!object->IsSloppyBlockWithEvalContextExtension()) return nullptr;
+ object = SloppyBlockWithEvalContextExtension::cast(object)->extension();
+ }
+ DCHECK(object->IsJSContextExtensionObject() ||
+ (IsNativeContext() && object->IsJSGlobalObject()));
+ return JSObject::cast(object);
+}
+
+
+JSReceiver* Context::extension_receiver() {
+ DCHECK(IsNativeContext() || IsWithContext() ||
+ IsFunctionContext() || IsBlockContext());
+ return IsWithContext() ? JSReceiver::cast(extension()) : extension_object();
+}
+
+
+ScopeInfo* Context::scope_info() {
+ DCHECK(IsModuleContext() || IsScriptContext() || IsBlockContext());
+ Object* object = extension();
+ if (object->IsSloppyBlockWithEvalContextExtension()) {
+ DCHECK(IsBlockContext());
+ object = SloppyBlockWithEvalContextExtension::cast(object)->scope_info();
+ }
+ return ScopeInfo::cast(object);
+}
+
+
+String* Context::catch_name() {
+ DCHECK(IsCatchContext());
+ return String::cast(extension());
+}
+
+
JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global_object();
if (object->IsJSGlobalObject()) {
@@ -194,7 +245,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
- *index = -1;
+ *index = kNotFound;
*attributes = ABSENT;
*binding_flags = MISSING_BINDING;
@@ -212,13 +263,11 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n");
}
-
// 1. Check global objects, subjects of with, and extension objects.
- if (context->IsNativeContext() ||
- context->IsWithContext() ||
- (context->IsFunctionContext() && context->has_extension())) {
- Handle<JSReceiver> object(
- JSReceiver::cast(context->extension()), isolate);
+ if ((context->IsNativeContext() || context->IsWithContext() ||
+ context->IsFunctionContext() || context->IsBlockContext()) &&
+ context->extension_receiver() != nullptr) {
+ Handle<JSReceiver> object(context->extension_receiver());
if (context->IsNativeContext()) {
if (FLAG_trace_contexts) {
@@ -280,24 +329,18 @@ Handle<Object> Context::Lookup(Handle<String> name,
context->IsScriptContext()) {
// Use serialized scope information of functions and blocks to search
// for the context index.
- Handle<ScopeInfo> scope_info;
- if (context->IsFunctionContext()) {
- scope_info = Handle<ScopeInfo>(
- context->closure()->shared()->scope_info(), isolate);
- } else {
- scope_info = Handle<ScopeInfo>(
- ScopeInfo::cast(context->extension()), isolate);
- }
+ Handle<ScopeInfo> scope_info(context->IsFunctionContext()
+ ? context->closure()->shared()->scope_info()
+ : context->scope_info());
VariableMode mode;
- VariableLocation location;
InitializationFlag init_flag;
// TODO(sigurds) Figure out whether maybe_assigned_flag should
// be used to compute binding_flags.
MaybeAssignedFlag maybe_assigned_flag;
int slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &mode, &location, &init_flag, &maybe_assigned_flag);
+ scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
- if (slot_index >= 0 && location == VariableLocation::CONTEXT) {
+ if (slot_index >= 0) {
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
slot_index, mode);
@@ -329,7 +372,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
} else if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
- if (String::Equals(name, handle(String::cast(context->extension())))) {
+ if (String::Equals(name, handle(context->catch_name()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
@@ -359,7 +402,7 @@ void Context::InitializeGlobalSlots() {
DCHECK(IsScriptContext());
DisallowHeapAllocation no_gc;
- ScopeInfo* scope_info = ScopeInfo::cast(extension());
+ ScopeInfo* scope_info = this->scope_info();
int context_globals = scope_info->ContextGlobalCount();
if (context_globals > 0) {
@@ -494,6 +537,33 @@ Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() {
}
+#define COMPARE_NAME(index, type, name) \
+ if (string->IsOneByteEqualTo(STATIC_CHAR_VECTOR(#name))) return index;
+
+int Context::ImportedFieldIndexForName(Handle<String> string) {
+ NATIVE_CONTEXT_IMPORTED_FIELDS(COMPARE_NAME)
+ return kNotFound;
+}
+
+
+int Context::IntrinsicIndexForName(Handle<String> string) {
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(COMPARE_NAME);
+ return kNotFound;
+}
+
+#undef COMPARE_NAME
+
+
+bool Context::IsJSBuiltin(Handle<Context> native_context,
+ Handle<JSFunction> function) {
+#define COMPARE_FUNCTION(index, type, name) \
+ if (*function == native_context->get(index)) return true;
+ NATIVE_CONTEXT_JS_BUILTINS(COMPARE_FUNCTION);
+#undef COMPARE_FUNCTION
+ return false;
+}
+
+
#ifdef DEBUG
bool Context::IsBootstrappingOrValidParentContext(
Object* object, Context* child) {
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 9e6fc0e4f5..1ca572576e 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -73,150 +73,190 @@ enum BindingFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(CONCAT_ITERABLE_TO_ARRAY_INDEX, JSFunction, concat_iterable_to_array) \
+ V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(NON_NUMBER_TO_NUMBER_INDEX, JSFunction, non_number_to_number) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
+ V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun)
+
+
+#define NATIVE_CONTEXT_JS_BUILTINS(V) \
+ V(APPLY_PREPARE_BUILTIN_INDEX, JSFunction, apply_prepare_builtin) \
+ V(CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX, JSFunction, \
+ concat_iterable_to_array_builtin) \
+ V(REFLECT_APPLY_PREPARE_BUILTIN_INDEX, JSFunction, \
+ reflect_apply_prepare_builtin) \
+ V(REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX, JSFunction, \
+ reflect_construct_prepare_builtin)
+
+
+#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
+ V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
+ V(ARRAY_POP_INDEX, JSFunction, array_pop) \
+ V(ARRAY_PUSH_INDEX, JSFunction, array_push) \
+ V(ARRAY_SHIFT_INDEX, JSFunction, array_shift) \
+ V(ARRAY_SPLICE_INDEX, JSFunction, array_splice) \
+ V(ARRAY_SLICE_INDEX, JSFunction, array_slice) \
+ V(ARRAY_UNSHIFT_INDEX, JSFunction, array_unshift) \
+ V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
+ V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
+ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
+ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
+ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
+ V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
+ V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
+ V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
+ V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter) \
+ V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function) \
+ V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
+ V(MAP_FROM_ARRAY_INDEX, JSFunction, map_from_array) \
+ V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
+ V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
+ V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
+ V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number) \
+ V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number) \
+ V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line) \
+ V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, native_object_get_notifier) \
+ V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
+ native_object_notifier_perform_change) \
+ V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe) \
+ V(NO_SIDE_EFFECT_TO_STRING_FUN_INDEX, JSFunction, \
+ no_side_effect_to_string_fun) \
+ V(OBJECT_VALUE_OF, JSFunction, object_value_of) \
+ V(OBJECT_TO_STRING, JSFunction, object_to_string) \
+ V(OBJECT_DEFINE_OWN_PROPERTY_INDEX, JSFunction, object_define_own_property) \
+ V(OBJECT_GET_OWN_PROPERTY_DESCROPTOR_INDEX, JSFunction, \
+ object_get_own_property_descriptor) \
+ V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
+ V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice) \
+ V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
+ V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
+ V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
+ V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
+ V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
+ V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
+ promise_has_user_defined_reject_handler) \
+ V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
+ V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
+ V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
+ V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
+ V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
+ V(SET_FROM_ARRAY_INDEX, JSFunction, set_from_array) \
+ V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
+ V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
+ V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
+ V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
+ to_complete_property_descriptor) \
+ V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
+ V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
+ V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
+ NATIVE_CONTEXT_JS_BUILTINS(V)
+
#define NATIVE_CONTEXT_FIELDS(V) \
V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
- V(SECURITY_TOKEN_INDEX, Object, security_token) \
+ V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
+ /* Below is alpha-sorted */ \
+ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
+ V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
+ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function) \
+ V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function) \
+ V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function) \
V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
- V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
- V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
- V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
- V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX, Map, \
+ bound_function_with_constructor_map) \
+ V(BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX, Map, \
+ bound_function_without_constructor_map) \
+ V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
+ call_as_constructor_delegate) \
+ V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
+ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
+ V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
+ V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
+ error_message_for_code_gen_from_strings) \
+ V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object) \
+ V(EXTRAS_UTILS_OBJECT_INDEX, JSObject, extras_utils_object) \
+ V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
+ V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
V(FLOAT32X4_FUNCTION_INDEX, JSFunction, float32x4_function) \
- V(INT32X4_FUNCTION_INDEX, JSFunction, int32x4_function) \
- V(BOOL32X4_FUNCTION_INDEX, JSFunction, bool32x4_function) \
+ V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
+ V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache) \
+ V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
+ V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
+ V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
+ V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
V(INT16X8_FUNCTION_INDEX, JSFunction, int16x8_function) \
- V(BOOL16X8_FUNCTION_INDEX, JSFunction, bool16x8_function) \
+ V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
+ V(INT32X4_FUNCTION_INDEX, JSFunction, int32x4_function) \
+ V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
V(INT8X16_FUNCTION_INDEX, JSFunction, int8x16_function) \
- V(BOOL8X16_FUNCTION_INDEX, JSFunction, bool8x16_function) \
- V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
- V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
- V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
+ V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
V(JS_ARRAY_STRONG_MAPS_INDEX, Object, js_array_strong_maps) \
+ V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
+ V(JS_MAP_MAP_INDEX, Map, js_map_map) \
+ V(JS_OBJECT_STRONG_MAP_INDEX, Map, js_object_strong_map) \
+ V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
+ V(JS_SET_MAP_INDEX, Map, js_set_map) \
+ V(MAP_CACHE_INDEX, Object, map_cache) \
+ V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
+ V(STRING_ITERATOR_MAP_INDEX, Map, string_iterator_map) \
+ V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
+ V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
+ V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
+ V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
+ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
+ V(OBJECT_FUNCTION_PROTOTYPE_MAP_INDEX, Map, object_function_prototype_map) \
+ V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
- V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
- V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
- V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
- V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
- V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
- V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
- V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
- V(NO_SIDE_EFFECT_TO_STRING_FUN_INDEX, JSFunction, \
- no_side_effect_to_string_fun) \
- V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
- V(TO_LENGTH_FUN_INDEX, JSFunction, to_length_fun) \
- V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
- V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
+ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
+ V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
+ V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
+ V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
+ V(SECURITY_TOKEN_INDEX, Object, security_token) \
+ V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
V(SHARED_ARRAY_BUFFER_FUN_INDEX, JSFunction, shared_array_buffer_fun) \
- V(ARRAY_BUFFER_MAP_INDEX, Map, array_buffer_map) \
- V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
- V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
- V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
- V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
- V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
- V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
- V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
- V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
- V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
- V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
+ V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
+ V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
+ sloppy_function_without_prototype_map) \
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_with_readonly_prototype_map) \
+ V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
+ V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
+ V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
- V(STRONG_FUNCTION_MAP_INDEX, Map, strong_function_map) \
- V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
- sloppy_function_without_prototype_map) \
V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_function_without_prototype_map) \
- V(STRONG_CONSTRUCTOR_MAP_INDEX, Map, strong_constructor_map) \
- V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map) \
- V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
- V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
- V(FAST_ALIASED_ARGUMENTS_MAP_INDEX, Map, fast_aliased_arguments_map) \
- V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
- V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
- V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
- V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
- V(FUNCTION_CACHE_INDEX, ObjectHashTable, function_cache) \
- V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache) \
- V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
- V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
- V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
- call_as_constructor_delegate) \
- V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
- V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
- V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
- V(MAP_CACHE_INDEX, Object, map_cache) \
- V(STRONG_MAP_CACHE_INDEX, Object, strong_map_cache) \
- V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
- V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
- error_message_for_code_gen_from_strings) \
- V(ERROR_FUNCTION_INDEX, JSFunction, error_function) \
- V(EVAL_ERROR_FUNCTION_INDEX, JSFunction, eval_error_function) \
- V(RANGE_ERROR_FUNCTION_INDEX, JSFunction, range_error_function) \
- V(REFERENCE_ERROR_FUNCTION_INDEX, JSFunction, reference_error_function) \
- V(SYNTAX_ERROR_FUNCTION_INDEX, JSFunction, syntax_error_function) \
- V(TYPE_ERROR_FUNCTION_INDEX, JSFunction, type_error_function) \
- V(URI_ERROR_FUNCTION_INDEX, JSFunction, uri_error_function) \
- V(MAKE_ERROR_FUNCTION_INDEX, JSFunction, make_error_function) \
- V(PROMISE_STATUS_INDEX, Symbol, promise_status) \
- V(PROMISE_VALUE_INDEX, Symbol, promise_value) \
- V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
- V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
- V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(PROMISE_HAS_USER_DEFINED_REJECT_HANDLER_INDEX, JSFunction, \
- promise_has_user_defined_reject_handler) \
- V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
- to_complete_property_descriptor) \
- V(OBJECT_DEFINE_OWN_PROPERTY_INDEX, JSFunction, object_define_own_property) \
- V(OBJECT_GET_OWN_PROPERTY_DESCROPTOR_INDEX, JSFunction, \
- object_get_own_property_descriptor) \
- V(MESSAGE_GET_LINE_NUMBER_INDEX, JSFunction, message_get_line_number) \
- V(MESSAGE_GET_COLUMN_NUMBER_INDEX, JSFunction, message_get_column_number) \
- V(MESSAGE_GET_SOURCE_LINE_INDEX, JSFunction, message_get_source_line) \
- V(STACK_OVERFLOW_BOILERPLATE_INDEX, JSObject, stack_overflow_boilerplate) \
- V(JSON_SERIALIZE_ADAPTER_INDEX, JSFunction, json_serialize_adapter) \
- V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
- V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
- V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
- V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
- V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
- V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
- V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice) \
- V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice) \
- V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe) \
- V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, native_object_get_notifier) \
- V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
- native_object_notifier_perform_change) \
- V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
+ V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
+ V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
+ V(STRONG_CONSTRUCTOR_MAP_INDEX, Map, strong_constructor_map) \
+ V(STRONG_FUNCTION_MAP_INDEX, Map, strong_function_map) \
V(STRONG_GENERATOR_FUNCTION_MAP_INDEX, Map, strong_generator_function_map) \
- V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
- V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
- V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
- V(JS_MAP_MAP_INDEX, Map, js_map_map) \
- V(JS_SET_FUN_INDEX, JSFunction, js_set_fun) \
- V(JS_SET_MAP_INDEX, Map, js_set_map) \
- V(MAP_GET_METHOD_INDEX, JSFunction, map_get) \
- V(MAP_SET_METHOD_INDEX, JSFunction, map_set) \
- V(MAP_HAS_METHOD_INDEX, JSFunction, map_has) \
- V(MAP_DELETE_METHOD_INDEX, JSFunction, map_delete) \
- V(SET_ADD_METHOD_INDEX, JSFunction, set_add) \
- V(SET_HAS_METHOD_INDEX, JSFunction, set_has) \
- V(SET_DELETE_METHOD_INDEX, JSFunction, set_delete) \
- V(MAP_FROM_ARRAY_INDEX, JSFunction, map_from_array) \
- V(SET_FROM_ARRAY_INDEX, JSFunction, set_from_array) \
- V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
- V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
- V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator) \
- V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table) \
- V(NATIVES_UTILS_OBJECT_INDEX, Object, natives_utils_object) \
- V(EXTRAS_EXPORTS_OBJECT_INDEX, JSObject, extras_binding_object)
-
+ V(STRONG_MAP_CACHE_INDEX, Object, strong_map_cache) \
+ V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
+ V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
+ V(UINT16X8_FUNCTION_INDEX, JSFunction, uint16x8_function) \
+ V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
+ V(UINT32X4_FUNCTION_INDEX, JSFunction, uint32x4_function) \
+ V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
+ V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
+ V(UINT8X16_FUNCTION_INDEX, JSFunction, uint8x16_function) \
+ NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ NATIVE_CONTEXT_IMPORTED_FIELDS(V)
// A table of all script contexts. Every loaded top-level script with top-level
// lexical declarations contributes its ScriptContext into this table.
@@ -226,28 +266,21 @@ enum BindingFlags {
class ScriptContextTable : public FixedArray {
public:
// Conversions.
- static ScriptContextTable* cast(Object* context) {
- DCHECK(context->IsScriptContextTable());
- return reinterpret_cast<ScriptContextTable*>(context);
- }
+ static inline ScriptContextTable* cast(Object* context);
struct LookupResult {
int context_index;
int slot_index;
VariableMode mode;
- VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
};
- int used() const { return Smi::cast(get(kUsedSlot))->value(); }
+ inline int used() const;
+ inline void set_used(int used);
- void set_used(int used) { set(kUsedSlot, Smi::FromInt(used)); }
-
- static Handle<Context> GetContext(Handle<ScriptContextTable> table, int i) {
- DCHECK(i < table->used());
- return Handle<Context>::cast(FixedArray::get(table, i + 1));
- }
+ static inline Handle<Context> GetContext(Handle<ScriptContextTable> table,
+ int i);
// Lookup a variable `name` in a ScriptContextTable.
// If it returns true, the variable is found and `result` contains
@@ -267,8 +300,9 @@ class ScriptContextTable : public FixedArray {
private:
static const int kUsedSlot = 0;
+ static const int kFirstContextSlot = kUsedSlot + 1;
static const int kFirstContextOffset =
- FixedArray::kHeaderSize + (kUsedSlot + 1) * kPointerSize;
+ FixedArray::kHeaderSize + kFirstContextSlot * kPointerSize;
DISALLOW_IMPLICIT_CONSTRUCTORS(ScriptContextTable);
};
@@ -301,8 +335,11 @@ class ScriptContextTable : public FixedArray {
// Dynamically declared variables/functions are also added
// to lazily allocated extension object. Context::Lookup
// searches the extension object for properties.
-// For global and block contexts, contains the respective
-// ScopeInfo.
+// For script and block contexts, contains the respective
+// ScopeInfo. For block contexts representing sloppy declaration
+// block scopes, it may also be a struct being a
+// SloppyBlockWithEvalContextExtension, pairing the ScopeInfo
+// with an extension object.
// For module contexts, points back to the respective JSModule.
//
// [ global_object ] A pointer to the global object. Provided for quick
@@ -323,10 +360,7 @@ class ScriptContextTable : public FixedArray {
class Context: public FixedArray {
public:
// Conversions.
- static Context* cast(Object* context) {
- DCHECK(context->IsContext());
- return reinterpret_cast<Context*>(context);
- }
+ static inline Context* cast(Object* context);
// The default context slot layout; indices are FixedArray slot indices.
enum {
@@ -362,35 +396,30 @@ class Context: public FixedArray {
};
// Direct slot access.
- JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
- void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
+ inline JSFunction* closure();
+ inline void set_closure(JSFunction* closure);
- Context* previous() {
- Object* result = unchecked_previous();
- DCHECK(IsBootstrappingOrValidParentContext(result, this));
- return reinterpret_cast<Context*>(result);
- }
- void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
+ inline Context* previous();
+ inline void set_previous(Context* context);
- bool has_extension() { return extension() != NULL; }
- Object* extension() { return get(EXTENSION_INDEX); }
- void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
+ inline bool has_extension();
+ inline Object* extension();
+ inline void set_extension(Object* object);
+ JSObject* extension_object();
+ JSReceiver* extension_receiver();
+ ScopeInfo* scope_info();
+ String* catch_name();
- JSModule* module() { return JSModule::cast(get(EXTENSION_INDEX)); }
- void set_module(JSModule* module) { set(EXTENSION_INDEX, module); }
+ inline JSModule* module();
+ inline void set_module(JSModule* module);
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
+ bool is_declaration_context();
- GlobalObject* global_object() {
- Object* result = get(GLOBAL_OBJECT_INDEX);
- DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
- return reinterpret_cast<GlobalObject*>(result);
- }
- void set_global_object(GlobalObject* object) {
- set(GLOBAL_OBJECT_INDEX, object);
- }
+ inline GlobalObject* global_object();
+ inline void set_global_object(GlobalObject* object);
// Returns a JSGlobalProxy object or null.
JSObject* global_proxy();
@@ -408,39 +437,15 @@ class Context: public FixedArray {
// Predicates for context types. IsNativeContext is also defined on Object
// because we frequently have to know if arbitrary objects are natives
// contexts.
- bool IsNativeContext() {
- Map* map = this->map();
- return map == map->GetHeap()->native_context_map();
- }
- bool IsFunctionContext() {
- Map* map = this->map();
- return map == map->GetHeap()->function_context_map();
- }
- bool IsCatchContext() {
- Map* map = this->map();
- return map == map->GetHeap()->catch_context_map();
- }
- bool IsWithContext() {
- Map* map = this->map();
- return map == map->GetHeap()->with_context_map();
- }
- bool IsBlockContext() {
- Map* map = this->map();
- return map == map->GetHeap()->block_context_map();
- }
- bool IsModuleContext() {
- Map* map = this->map();
- return map == map->GetHeap()->module_context_map();
- }
- bool IsScriptContext() {
- Map* map = this->map();
- return map == map->GetHeap()->script_context_map();
- }
+ inline bool IsNativeContext();
+ inline bool IsFunctionContext();
+ inline bool IsCatchContext();
+ inline bool IsWithContext();
+ inline bool IsBlockContext();
+ inline bool IsModuleContext();
+ inline bool IsScriptContext();
- bool HasSameSecurityTokenAs(Context* that) {
- return this->global_object()->native_context()->security_token() ==
- that->global_object()->native_context()->security_token();
- }
+ inline bool HasSameSecurityTokenAs(Context* that);
// Initializes global variable bindings in given script context.
void InitializeGlobalSlots();
@@ -461,19 +466,16 @@ class Context: public FixedArray {
Handle<Object> ErrorMessageForCodeGenerationFromStrings();
+ static int ImportedFieldIndexForName(Handle<String> name);
+ static int IntrinsicIndexForName(Handle<String> name);
+
+ static bool IsJSBuiltin(Handle<Context> native_context,
+ Handle<JSFunction> function);
+
#define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
- void set_##name(type* value) { \
- DCHECK(IsNativeContext()); \
- set(index, value); \
- } \
- bool is_##name(type* value) { \
- DCHECK(IsNativeContext()); \
- return type::cast(get(index)) == value; \
- } \
- type* name() { \
- DCHECK(IsNativeContext()); \
- return type::cast(get(index)); \
- }
+ inline void set_##name(type* value); \
+ inline bool is_##name(type* value); \
+ inline type* name();
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
#undef NATIVE_CONTEXT_FIELD_ACCESSORS
@@ -512,7 +514,7 @@ class Context: public FixedArray {
: SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
}
- if (IsConstructor(kind)) {
+ if (IsClassConstructor(kind)) {
// Use strict function map (no own "caller" / "arguments")
return is_strong(language_mode) ? STRONG_CONSTRUCTOR_MAP_INDEX
: STRICT_FUNCTION_MAP_INDEX;
@@ -531,6 +533,7 @@ class Context: public FixedArray {
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
+ static const int kNotFound = -1;
// GC support.
typedef FixedBodyDescriptor<
@@ -542,9 +545,6 @@ class Context: public FixedArray {
kSize> MarkCompactBodyDescriptor;
private:
- // Unchecked access to the slots.
- Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
-
#ifdef DEBUG
// Bootstrapping-aware type checks.
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 42d617d017..4b3ac27cf1 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -10,6 +10,7 @@
#include <stdarg.h>
#include <cmath>
#include "src/globals.h" // Required for V8_INFINITY
+#include "src/unicode-cache-inl.h"
// ----------------------------------------------------------------------------
// Extra POSIX/ANSI functions for Win32/MSVC.
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 5877473854..2ce1d70fe6 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -2,16 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/conversions.h"
+
#include <limits.h>
#include <stdarg.h>
#include <cmath>
-#include "src/v8.h"
-
#include "src/assert-scope.h"
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
-#include "src/conversions.h"
#include "src/dtoa.h"
#include "src/factory.h"
#include "src/list-inl.h"
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 2788ff7f9f..e8dea2e073 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -2,10 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/counters.h"
#include "src/base/platform/platform.h"
-#include "src/counters.h"
#include "src/isolate.h"
#include "src/log-inl.h"
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index db0c70a8f4..740b2a86e4 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -571,11 +571,6 @@ double AggregatedMemoryHistogram<Histogram>::Aggregate(double current_ms,
SC(arguments_adaptors, V8.ArgumentsAdaptors) \
SC(compilation_cache_hits, V8.CompilationCacheHits) \
SC(compilation_cache_misses, V8.CompilationCacheMisses) \
- SC(string_ctor_calls, V8.StringConstructorCalls) \
- SC(string_ctor_conversions, V8.StringConstructorConversions) \
- SC(string_ctor_cached_number, V8.StringConstructorCachedNumber) \
- SC(string_ctor_string_value, V8.StringConstructorStringValue) \
- SC(string_ctor_gc_required, V8.StringConstructorGCRequired) \
/* Amount of evaled source code. */ \
SC(total_eval_size, V8.TotalEvalSize) \
/* Amount of loaded source code. */ \
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 58b59c890f..b73ab0bd6a 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -49,10 +49,6 @@
#include "src/v8.h"
#endif // !V8_SHARED
-#if defined(V8_WASM)
-#include "src/wasm/wasm-js.h"
-#endif
-
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#else
@@ -1050,7 +1046,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
i::JSFunction::cast(*compiled_script)->shared()->script()))
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
- script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
+ script_object->set_type(i::Script::TYPE_NATIVE);
}
#endif // !V8_SHARED
@@ -1182,11 +1178,6 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
os_templ);
-#if defined(V8_WASM)
- // Install WASM API.
- WasmJs::Install(isolate, global_template);
-#endif
-
return global_template;
}
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index f7e36dd9c9..104bc940a6 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -138,6 +138,25 @@
],
},
],
- }
+ },
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop" and v8_toolset_for_d8 == "target"', {
+ 'targets': [
+ {
+ 'target_name': 'd8_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'd8',
+ ],
+ 'includes': [
+ '../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'd8.isolate',
+ ],
+ },
+ ],
+ }],
],
}
diff --git a/deps/v8/src/d8.isolate b/deps/v8/src/d8.isolate
new file mode 100644
index 0000000000..1c9bd9e35c
--- /dev/null
+++ b/deps/v8/src/d8.isolate
@@ -0,0 +1,16 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'command': [
+ '<(PRODUCT_DIR)/d8<(EXECUTABLE_SUFFIX)',
+ ],
+ 'files': [
+ '<(PRODUCT_DIR)/d8<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ 'includes': [
+ 'base.isolate',
+ ],
+} \ No newline at end of file
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 68c7507d08..3106b1622b 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -4,8 +4,6 @@
#include "src/date.h"
-#include "src/v8.h"
-
#include "src/objects.h"
#include "src/objects-inl.h"
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 3a74e102ee..d2d59152ef 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -16,19 +16,18 @@ var $createDate;
// Imports
var GlobalDate = global.Date;
+var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
var IsFinite;
var MathAbs;
var MathFloor;
var ToNumber;
-var ToString;
utils.Import(function(from) {
IsFinite = from.IsFinite;
MathAbs = from.MathAbs;
MathFloor = from.MathFloor;
ToNumber = from.ToNumber;
- ToString = from.ToString;
});
// -------------------------------------------------------------------
@@ -148,6 +147,7 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
} else if (argc == 1) {
if (IS_NUMBER(year)) {
value = year;
+
} else if (IS_STRING(year)) {
// Probe the Date cache. If we already have a time value for the
// given time, we re-use that instead of parsing the string again.
@@ -163,15 +163,11 @@ function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
}
}
+ } else if (IS_DATE(year)) {
+ value = UTC_DATE_VALUE(year);
+
} else {
- // According to ECMA 262, no hint should be given for this
- // conversion. However, ToPrimitive defaults to STRING_HINT for
- // Date objects which will lose precision when the Date
- // constructor is called with another Date object as its
- // argument. We therefore use NUMBER_HINT for the conversion,
- // which is the default for everything else than Date objects.
- // This makes us behave like KJS and SpiderMonkey.
- var time = $toPrimitive(year, NUMBER_HINT);
+ var time = TO_PRIMITIVE(year);
value = IS_STRING(time) ? DateParse(time) : ToNumber(time);
}
SET_UTC_DATE_VALUE(this, value);
@@ -270,7 +266,7 @@ var parse_buffer = new InternalArray(8);
// ECMA 262 - 15.9.4.2
function DateParse(string) {
- var arr = %DateParseString(ToString(string), parse_buffer);
+ var arr = %DateParseString(string, parse_buffer);
if (IS_NULL(arr)) return NAN;
var day = MakeDay(arr[0], arr[1], arr[2]);
@@ -777,9 +773,10 @@ function DateToISOString() {
}
+// 20.3.4.37 Date.prototype.toJSON ( key )
function DateToJSON(key) {
var o = TO_OBJECT(this);
- var tv = $defaultNumber(o);
+ var tv = TO_PRIMITIVE_NUMBER(o);
if (IS_NUMBER(tv) && !NUMBER_IS_FINITE(tv)) {
return null;
}
@@ -820,7 +817,7 @@ function CreateDate(time) {
// -------------------------------------------------------------------
%SetCode(GlobalDate, DateConstructor);
-%FunctionSetPrototype(GlobalDate, new GlobalDate(NAN));
+%FunctionSetPrototype(GlobalDate, new GlobalObject());
// Set up non-enumerable properties of the Date object itself.
utils.InstallFunctions(GlobalDate, DONT_ENUM, [
@@ -883,8 +880,6 @@ utils.InstallFunctions(GlobalDate.prototype, DONT_ENUM, [
"toJSON", DateToJSON
]);
-utils.ExportToRuntime(function(to) {
- to.CreateDate = CreateDate;
-});
+%InstallToContext(["create_date_fun", CreateDate]);
})
diff --git a/deps/v8/src/dateparser-inl.h b/deps/v8/src/dateparser-inl.h
index d405ab6493..e70c34a831 100644
--- a/deps/v8/src/dateparser-inl.h
+++ b/deps/v8/src/dateparser-inl.h
@@ -7,6 +7,7 @@
#include "src/char-predicates-inl.h"
#include "src/dateparser.h"
+#include "src/unicode-cache-inl.h"
namespace v8 {
namespace internal {
@@ -196,10 +197,31 @@ DateParser::DateToken DateParser::DateStringTokenizer<CharType>::Scan() {
template <typename Char>
+bool DateParser::InputReader<Char>::SkipWhiteSpace() {
+ if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
+ Next();
+ return true;
+ }
+ return false;
+}
+
+
+template <typename Char>
+bool DateParser::InputReader<Char>::SkipParentheses() {
+ if (ch_ != '(') return false;
+ int balance = 0;
+ do {
+ if (ch_ == ')') --balance;
+ else if (ch_ == '(') ++balance;
+ Next();
+ } while (balance > 0 && ch_);
+ return true;
+}
+
+
+template <typename Char>
DateParser::DateToken DateParser::ParseES5DateTime(
- DateStringTokenizer<Char>* scanner,
- DayComposer* day,
- TimeComposer* time,
+ DateStringTokenizer<Char>* scanner, DayComposer* day, TimeComposer* time,
TimeZoneComposer* tz) {
DCHECK(day->IsEmpty());
DCHECK(time->IsEmpty());
diff --git a/deps/v8/src/dateparser.h b/deps/v8/src/dateparser.h
index aa57bb1c66..616318db29 100644
--- a/deps/v8/src/dateparser.h
+++ b/deps/v8/src/dateparser.h
@@ -99,24 +99,8 @@ class DateParser : public AllStatic {
return false;
}
- bool SkipWhiteSpace() {
- if (unicode_cache_->IsWhiteSpaceOrLineTerminator(ch_)) {
- Next();
- return true;
- }
- return false;
- }
-
- bool SkipParentheses() {
- if (ch_ != '(') return false;
- int balance = 0;
- do {
- if (ch_ == ')') --balance;
- else if (ch_ == '(') ++balance;
- Next();
- } while (balance > 0 && ch_);
- return true;
- }
+ inline bool SkipWhiteSpace();
+ inline bool SkipParentheses();
// Character testing/classification. Non-ASCII digits are not supported.
bool Is(uint32_t c) const { return ch_ == c; }
@@ -375,9 +359,7 @@ class DateParser : public AllStatic {
// legacy parser.
template <typename Char>
static DateParser::DateToken ParseES5DateTime(
- DateStringTokenizer<Char>* scanner,
- DayComposer* day,
- TimeComposer* time,
+ DateStringTokenizer<Char>* scanner, DayComposer* day, TimeComposer* time,
TimeZoneComposer* tz);
};
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 323da7312c..b249561324 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -10,7 +10,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
#include "src/frames-inl.h"
-#include "src/isolate.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -106,7 +106,7 @@ MaybeHandle<Object> DebugEvaluate::Evaluate(
if (result->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, result);
// TODO(verwaest): This will crash when the global proxy is detached.
- result = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ result = PrototypeIterator::GetCurrent<JSObject>(iter);
}
return result;
@@ -280,15 +280,14 @@ Handle<Context> DebugEvaluate::ContextBuilder::MaterializeReceiver(
switch (scope_info->scope_type()) {
case FUNCTION_SCOPE: {
VariableMode mode;
- VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
// Don't bother creating a fake context node if "this" is in the context
// already.
- if (ScopeInfo::ContextSlotIndex(
- scope_info, isolate_->factory()->this_string(), &mode, &location,
- &init_flag, &maybe_assigned_flag) >= 0) {
+ if (ScopeInfo::ContextSlotIndex(scope_info,
+ isolate_->factory()->this_string(), &mode,
+ &init_flag, &maybe_assigned_flag) >= 0) {
return target;
}
receiver = handle(frame_->receiver(), isolate_);
diff --git a/deps/v8/src/debug/debug-frames.cc b/deps/v8/src/debug/debug-frames.cc
index c0970a359f..c4c288148c 100644
--- a/deps/v8/src/debug/debug-frames.cc
+++ b/deps/v8/src/debug/debug-frames.cc
@@ -180,11 +180,10 @@ void FrameInspector::UpdateStackLocalsFromMaterializedObject(
bool FrameInspector::ParameterIsShadowedByContextLocal(
Handle<ScopeInfo> info, Handle<String> parameter_name) {
VariableMode mode;
- VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &location,
- &init_flag, &maybe_assigned_flag) != -1;
+ return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag,
+ &maybe_assigned_flag) != -1;
}
diff --git a/deps/v8/src/debug/debug-scopes.cc b/deps/v8/src/debug/debug-scopes.cc
index b9204f6050..e8ef240393 100644
--- a/deps/v8/src/debug/debug-scopes.cc
+++ b/deps/v8/src/debug/debug-scopes.cc
@@ -7,6 +7,7 @@
#include "src/debug/debug.h"
#include "src/frames-inl.h"
#include "src/globals.h"
+#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -118,7 +119,7 @@ ScopeIterator::ScopeIterator(Isolate* isolate, Handle<JSFunction> function)
context_(function->context()),
seen_script_scope_(false),
failed_(false) {
- if (function->IsBuiltin()) context_ = Handle<Context>();
+ if (!function->IsSubjectToDebugging()) context_ = Handle<Context>();
}
@@ -237,7 +238,8 @@ MaybeHandle<JSObject> ScopeIterator::ScopeObject() {
return MaterializeLocalScope();
case ScopeIterator::ScopeTypeWith:
// Return the with object.
- return Handle<JSObject>(JSObject::cast(CurrentContext()->extension()));
+ // TODO(neis): This breaks for proxies.
+ return handle(JSObject::cast(CurrentContext()->extension_receiver()));
case ScopeIterator::ScopeTypeCatch:
return MaterializeCatchScope();
case ScopeIterator::ScopeTypeClosure:
@@ -295,7 +297,7 @@ Handle<ScopeInfo> ScopeIterator::CurrentScopeInfo() {
if (!nested_scope_chain_.is_empty()) {
return nested_scope_chain_.last();
} else if (context_->IsBlockContext()) {
- return Handle<ScopeInfo>(ScopeInfo::cast(context_->extension()));
+ return Handle<ScopeInfo>(context_->scope_info());
} else if (context_->IsFunctionContext()) {
return Handle<ScopeInfo>(context_->closure()->shared()->scope_info());
}
@@ -410,7 +412,7 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeScriptScope() {
context_index++) {
Handle<Context> context =
ScriptContextTable::GetContext(script_contexts, context_index);
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+ Handle<ScopeInfo> scope_info(context->scope_info());
CopyContextLocalsToScopeObject(scope_info, context, script_scope);
}
return script_scope;
@@ -438,28 +440,13 @@ MaybeHandle<JSObject> ScopeIterator::MaterializeLocalScope() {
// Finally copy any properties from the function context extension.
// These will be variables introduced by eval.
- if (function_context->closure() == *function) {
- if (function_context->has_extension() &&
- !function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
- Handle<FixedArray> keys;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, keys, JSReceiver::GetKeys(ext, JSReceiver::INCLUDE_PROTOS),
- JSObject);
-
- for (int i = 0; i < keys->length(); i++) {
- // Names of variables introduced by eval are strings.
- DCHECK(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate_, value, Object::GetPropertyOrElement(ext, key), JSObject);
- RETURN_ON_EXCEPTION(isolate_,
- Runtime::SetObjectProperty(isolate_, local_scope,
- key, value, SLOPPY),
- JSObject);
- }
- }
+ if (function_context->closure() == *function &&
+ function_context->has_extension() &&
+ !function_context->IsNativeContext()) {
+ bool success = CopyContextExtensionToScopeObject(
+ handle(function_context->extension_object(), isolate_),
+ local_scope, JSReceiver::INCLUDE_PROTOS);
+ if (!success) return MaybeHandle<JSObject>();
}
return local_scope;
@@ -486,20 +473,11 @@ Handle<JSObject> ScopeIterator::MaterializeClosure() {
// Finally copy any properties from the function context extension. This will
// be variables introduced by eval.
if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- DCHECK(ext->IsJSContextExtensionObject());
- Handle<FixedArray> keys =
- JSReceiver::GetKeys(ext, JSReceiver::OWN_ONLY).ToHandleChecked();
-
- for (int i = 0; i < keys->length(); i++) {
- HandleScope scope(isolate_);
- // Names of variables introduced by eval are strings.
- DCHECK(keys->get(i)->IsString());
- Handle<String> key(String::cast(keys->get(i)));
- Handle<Object> value = Object::GetProperty(ext, key).ToHandleChecked();
- JSObject::SetOwnPropertyIgnoreAttributes(closure_scope, key, value, NONE)
- .Check();
- }
+ bool success = CopyContextExtensionToScopeObject(
+ handle(context->extension_object(), isolate_), closure_scope,
+ JSReceiver::OWN_ONLY);
+ DCHECK(success);
+ USE(success);
}
return closure_scope;
@@ -511,7 +489,7 @@ Handle<JSObject> ScopeIterator::MaterializeClosure() {
Handle<JSObject> ScopeIterator::MaterializeCatchScope() {
Handle<Context> context = CurrentContext();
DCHECK(context->IsCatchContext());
- Handle<String> name(String::cast(context->extension()));
+ Handle<String> name(context->catch_name());
Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
isolate_);
Handle<JSObject> catch_scope =
@@ -539,11 +517,17 @@ Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
}
if (!context.is_null()) {
- Handle<ScopeInfo> scope_info_from_context(
- ScopeInfo::cast(context->extension()));
// Fill all context locals.
- CopyContextLocalsToScopeObject(scope_info_from_context, context,
- block_scope);
+ CopyContextLocalsToScopeObject(handle(context->scope_info()),
+ context, block_scope);
+ // Fill all extension variables.
+ if (context->extension_object() != nullptr) {
+ bool success = CopyContextExtensionToScopeObject(
+ handle(context->extension_object()), block_scope,
+ JSReceiver::OWN_ONLY);
+ DCHECK(success);
+ USE(success);
+ }
}
return block_scope;
}
@@ -554,7 +538,7 @@ Handle<JSObject> ScopeIterator::MaterializeBlockScope() {
MaybeHandle<JSObject> ScopeIterator::MaterializeModuleScope() {
Handle<Context> context = CurrentContext();
DCHECK(context->IsModuleContext());
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+ Handle<ScopeInfo> scope_info(context->scope_info());
// Allocate and initialize a JSObject with all the members of the debugged
// module.
@@ -577,12 +561,10 @@ bool ScopeIterator::SetContextLocalValue(Handle<ScopeInfo> scope_info,
Handle<String> next_name(scope_info->ContextLocalName(i));
if (String::Equals(variable_name, next_name)) {
VariableMode mode;
- VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int context_index =
- ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &location,
- &init_flag, &maybe_assigned_flag);
+ int context_index = ScopeInfo::ContextSlotIndex(
+ scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
context->set(context_index, *new_value);
return true;
}
@@ -636,7 +618,7 @@ bool ScopeIterator::SetLocalVariableValue(Handle<String> variable_name,
if (function_context->closure() == *function) {
if (function_context->has_extension() &&
!function_context->IsNativeContext()) {
- Handle<JSObject> ext(JSObject::cast(function_context->extension()));
+ Handle<JSObject> ext(function_context->extension_object());
Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
DCHECK(maybe.IsJust());
@@ -670,9 +652,25 @@ bool ScopeIterator::SetBlockVariableValue(Handle<String> variable_name,
}
if (HasContext()) {
- return SetContextLocalValue(scope_info, CurrentContext(), variable_name,
- new_value);
+ Handle<Context> context = CurrentContext();
+ if (SetContextLocalValue(scope_info, context, variable_name, new_value)) {
+ return true;
+ }
+
+ Handle<JSObject> ext(context->extension_object(), isolate_);
+ if (!ext.is_null()) {
+ Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
+ // We don't expect this to do anything except replacing property value.
+ JSObject::SetOwnPropertyIgnoreAttributes(ext, variable_name, new_value,
+ NONE)
+ .Check();
+ return true;
+ }
+ }
}
+
return false;
}
@@ -693,8 +691,7 @@ bool ScopeIterator::SetClosureVariableValue(Handle<String> variable_name,
// Properties from the function context extension. This will
// be variables introduced by eval.
if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- DCHECK(ext->IsJSContextExtensionObject());
+ Handle<JSObject> ext(JSObject::cast(context->extension_object()));
Maybe<bool> maybe = JSReceiver::HasOwnProperty(ext, variable_name);
DCHECK(maybe.IsJust());
if (maybe.FromJust()) {
@@ -732,7 +729,7 @@ bool ScopeIterator::SetCatchVariableValue(Handle<String> variable_name,
Handle<Object> new_value) {
Handle<Context> context = CurrentContext();
DCHECK(context->IsCatchContext());
- Handle<String> name(String::cast(context->extension()));
+ Handle<String> name(context->catch_name());
if (!String::Equals(name, variable_name)) {
return false;
}
@@ -765,5 +762,27 @@ void ScopeIterator::CopyContextLocalsToScopeObject(
}
}
+
+bool ScopeIterator::CopyContextExtensionToScopeObject(
+ Handle<JSObject> extension, Handle<JSObject> scope_object,
+ JSReceiver::KeyCollectionType type) {
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, keys, JSReceiver::GetKeys(extension, type), false);
+
+ for (int i = 0; i < keys->length(); i++) {
+ // Names of variables introduced by eval are strings.
+ DCHECK(keys->get(i)->IsString());
+ Handle<String> key(String::cast(keys->get(i)));
+ Handle<Object> value;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate_, value, Object::GetPropertyOrElement(extension, key), false);
+ RETURN_ON_EXCEPTION_VALUE(
+ isolate_, JSObject::SetOwnPropertyIgnoreAttributes(
+ scope_object, key, value, NONE), false);
+ }
+ return true;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-scopes.h b/deps/v8/src/debug/debug-scopes.h
index 0247cc4bce..20cd0336dc 100644
--- a/deps/v8/src/debug/debug-scopes.h
+++ b/deps/v8/src/debug/debug-scopes.h
@@ -116,6 +116,9 @@ class ScopeIterator {
void CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object);
+ bool CopyContextExtensionToScopeObject(Handle<JSObject> extension,
+ Handle<JSObject> scope_object,
+ JSReceiver::KeyCollectionType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(ScopeIterator);
};
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 3ab10132a8..4f23555d28 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -16,6 +16,7 @@
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
+#include "src/isolate-inl.h"
#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
@@ -40,7 +41,6 @@ Debug::Debug(Isolate* isolate)
in_debug_event_listener_(false),
break_on_exception_(false),
break_on_uncaught_exception_(false),
- script_cache_(NULL),
debug_info_list_(NULL),
isolate_(isolate) {
ThreadInit();
@@ -354,66 +354,6 @@ int Debug::ArchiveSpacePerThread() {
}
-ScriptCache::ScriptCache(Isolate* isolate) : isolate_(isolate) {
- Heap* heap = isolate_->heap();
- HandleScope scope(isolate_);
-
- DCHECK(isolate_->debug()->is_active());
-
- // Perform a GC to get rid of all unreferenced scripts.
- heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "ScriptCache");
-
- // Scan heap for Script objects.
- List<Handle<Script> > scripts;
- {
- HeapIterator iterator(heap, HeapIterator::kFilterUnreachable);
- DisallowHeapAllocation no_allocation;
- for (HeapObject* obj = iterator.next(); obj != NULL;
- obj = iterator.next()) {
- if (obj->IsScript() && Script::cast(obj)->HasValidSource()) {
- scripts.Add(Handle<Script>(Script::cast(obj)));
- }
- }
- }
-
- GlobalHandles* global_handles = isolate_->global_handles();
- table_ = Handle<WeakValueHashTable>::cast(global_handles->Create(
- Object::cast(*WeakValueHashTable::New(isolate_, scripts.length()))));
- for (int i = 0; i < scripts.length(); i++) Add(scripts[i]);
-}
-
-
-void ScriptCache::Add(Handle<Script> script) {
- HandleScope scope(isolate_);
- Handle<Smi> id(script->id(), isolate_);
-
-#ifdef DEBUG
- Handle<Object> lookup(table_->LookupWeak(id), isolate_);
- if (!lookup->IsTheHole()) {
- Handle<Script> found = Handle<Script>::cast(lookup);
- DCHECK(script->id() == found->id());
- DCHECK(!script->name()->IsString() ||
- String::cast(script->name())->Equals(String::cast(found->name())));
- }
-#endif
-
- Handle<WeakValueHashTable> new_table =
- WeakValueHashTable::PutWeak(table_, id, script);
-
- if (new_table.is_identical_to(table_)) return;
- GlobalHandles* global_handles = isolate_->global_handles();
- global_handles->Destroy(Handle<Object>::cast(table_).location());
- table_ = Handle<WeakValueHashTable>::cast(
- global_handles->Create(Object::cast(*new_table)));
-}
-
-
-ScriptCache::~ScriptCache() {
- isolate_->global_handles()->Destroy(Handle<Object>::cast(table_).location());
- table_ = Handle<WeakValueHashTable>();
-}
-
-
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
// Globalize the request debug info object and make it weak.
GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
@@ -466,12 +406,6 @@ void Debug::Unload() {
// Return debugger is not loaded.
if (!is_loaded()) return;
- // Clear the script cache.
- if (script_cache_ != NULL) {
- delete script_cache_;
- script_cache_ = NULL;
- }
-
// Clear debugger context global handle.
GlobalHandles::Destroy(Handle<Object>::cast(debug_context_).location());
debug_context_ = Handle<Context>();
@@ -756,8 +690,8 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
Handle<DebugInfo> debug_info = node->debug_info();
// Find the break point and clear it.
- Address pc = debug_info->code()->entry() +
- break_point_info->code_position()->value();
+ Address pc =
+ debug_info->code()->entry() + break_point_info->code_position();
BreakLocation location =
BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, pc);
@@ -1137,10 +1071,10 @@ Handle<Object> Debug::GetSourceBreakLocations(
Smi* position = NULL;
switch (position_alignment) {
case STATEMENT_ALIGNED:
- position = break_point_info->statement_position();
+ position = Smi::FromInt(break_point_info->statement_position());
break;
case BREAK_POSITION_ALIGNED:
- position = break_point_info->source_position();
+ position = Smi::FromInt(break_point_info->source_position());
break;
}
for (int j = 0; j < break_points; ++j) locations->set(count++, position);
@@ -1491,17 +1425,17 @@ Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
while (true) {
// Go through all shared function infos associated with this script to
// find the inner most function containing this position.
+ // If there is no shared function info for this script at all, there is
+ // no point in looking for it by walking the heap.
if (!script->shared_function_infos()->IsWeakFixedArray()) break;
- WeakFixedArray* array =
- WeakFixedArray::cast(script->shared_function_infos());
SharedFunctionInfo* shared;
{
SharedFunctionInfoFinder finder(position);
- for (int i = 0; i < array->Length(); i++) {
- Object* item = array->Get(i);
- if (!item->IsSharedFunctionInfo()) continue;
- finder.NewCandidate(SharedFunctionInfo::cast(item));
+ WeakFixedArray::Iterator iterator(script->shared_function_infos());
+ SharedFunctionInfo* candidate;
+ while ((candidate = iterator.Next<SharedFunctionInfo>())) {
+ finder.NewCandidate(candidate);
}
shared = finder.Result();
if (shared == NULL) break;
@@ -1560,7 +1494,7 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
if (function.is_null()) {
DCHECK(shared->HasDebugCode());
- } else if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
+ } else if (!Compiler::Compile(function, CLEAR_EXCEPTION)) {
return false;
}
@@ -1675,17 +1609,24 @@ void Debug::ClearMirrorCache() {
Handle<FixedArray> Debug::GetLoadedScripts() {
- // Create and fill the script cache when the loaded scripts is requested for
- // the first time.
- if (script_cache_ == NULL) script_cache_ = new ScriptCache(isolate_);
-
- // Perform GC to get unreferenced scripts evicted from the cache before
- // returning the content.
- isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags,
- "Debug::GetLoadedScripts");
-
- // Get the scripts from the cache.
- return script_cache_->GetScripts();
+ isolate_->heap()->CollectAllGarbage();
+ Factory* factory = isolate_->factory();
+ if (!factory->script_list()->IsWeakFixedArray()) {
+ return factory->empty_fixed_array();
+ }
+ Handle<WeakFixedArray> array =
+ Handle<WeakFixedArray>::cast(factory->script_list());
+ Handle<FixedArray> results = factory->NewFixedArray(array->Length());
+ int length = 0;
+ {
+ Script::Iterator iterator(isolate_);
+ Script* script;
+ while ((script = iterator.Next())) {
+ if (script->HasValidSource()) results->set(length++, script);
+ }
+ }
+ results->Shrink(length);
+ return results;
}
@@ -1737,7 +1678,7 @@ void Debug::RecordEvalCaller(Handle<Script> script) {
Code* code = it.frame()->LookupCode();
int offset = static_cast<int>(
it.frame()->pc() - code->instruction_start());
- script->set_eval_from_instructions_offset(Smi::FromInt(offset));
+ script->set_eval_from_instructions_offset(offset);
}
}
@@ -1873,29 +1814,6 @@ void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
}
-void Debug::OnCompileError(Handle<Script> script) {
- if (ignore_events()) return;
- SuppressDebug while_processing(this);
-
- if (in_debug_scope()) {
- ProcessCompileEventInDebugScope(v8::CompileError, script);
- return;
- }
-
- HandleScope scope(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
-
- // Create the compile state object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakeCompileEvent(script, v8::CompileError).ToHandle(&event_data)) return;
-
- // Process debug event.
- ProcessDebugEvent(v8::CompileError, Handle<JSObject>::cast(event_data), true);
-}
-
-
void Debug::OnDebugBreak(Handle<Object> break_points_hit,
bool auto_continue) {
// The caller provided for DebugScope.
@@ -1916,59 +1834,19 @@ void Debug::OnDebugBreak(Handle<Object> break_points_hit,
}
-void Debug::OnBeforeCompile(Handle<Script> script) {
- if (in_debug_scope() || ignore_events()) return;
- SuppressDebug while_processing(this);
+void Debug::OnCompileError(Handle<Script> script) {
+ ProcessCompileEvent(v8::CompileError, script);
+}
- HandleScope scope(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
- // Create the event data object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakeCompileEvent(script, v8::BeforeCompile).ToHandle(&event_data))
- return;
-
- // Process debug event.
- ProcessDebugEvent(v8::BeforeCompile,
- Handle<JSObject>::cast(event_data),
- true);
+void Debug::OnBeforeCompile(Handle<Script> script) {
+ ProcessCompileEvent(v8::BeforeCompile, script);
}
// Handle debugger actions when a new script is compiled.
void Debug::OnAfterCompile(Handle<Script> script) {
- // Add the newly compiled script to the script cache.
- if (script_cache_ != NULL) script_cache_->Add(script);
-
- if (ignore_events()) return;
- SuppressDebug while_processing(this);
-
- if (in_debug_scope()) {
- ProcessCompileEventInDebugScope(v8::AfterCompile, script);
- return;
- }
-
- HandleScope scope(isolate_);
- DebugScope debug_scope(this);
- if (debug_scope.failed()) return;
-
- // If debugging there might be script break points registered for this
- // script. Make sure that these break points are set.
- Handle<Object> argv[] = {Script::GetWrapper(script)};
- if (CallFunction("UpdateScriptBreakPoints", arraysize(argv), argv)
- .is_null()) {
- return;
- }
-
- // Create the compile state object.
- Handle<Object> event_data;
- // Bail out and don't call debugger if exception.
- if (!MakeCompileEvent(script, v8::AfterCompile).ToHandle(&event_data)) return;
-
- // Process debug event.
- ProcessDebugEvent(v8::AfterCompile, Handle<JSObject>::cast(event_data), true);
+ ProcessCompileEvent(v8::AfterCompile, script);
}
@@ -2072,23 +1950,44 @@ void Debug::CallEventCallback(v8::DebugEvent event,
}
-void Debug::ProcessCompileEventInDebugScope(v8::DebugEvent event,
- Handle<Script> script) {
- if (event_listener_.is_null()) return;
+void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
+ if (ignore_events()) return;
+ SuppressDebug while_processing(this);
+ bool in_nested_debug_scope = in_debug_scope();
+ HandleScope scope(isolate_);
DebugScope debug_scope(this);
if (debug_scope.failed()) return;
+ if (event == v8::AfterCompile) {
+ // If debugging there might be script break points registered for this
+ // script. Make sure that these break points are set.
+ Handle<Object> argv[] = {Script::GetWrapper(script)};
+ if (CallFunction("UpdateScriptBreakPoints", arraysize(argv), argv)
+ .is_null()) {
+ return;
+ }
+ }
+
+ // Create the compile state object.
Handle<Object> event_data;
// Bail out and don't call debugger if exception.
if (!MakeCompileEvent(script, event).ToHandle(&event_data)) return;
- // Create the execution state.
- Handle<Object> exec_state;
- // Bail out and don't call debugger if exception.
- if (!MakeExecutionState().ToHandle(&exec_state)) return;
+ // Don't call NotifyMessageHandler if already in debug scope to avoid running
+ // nested command loop.
+ if (in_nested_debug_scope) {
+ if (event_listener_.is_null()) return;
+ // Create the execution state.
+ Handle<Object> exec_state;
+ // Bail out and don't call debugger if exception.
+ if (!MakeExecutionState().ToHandle(&exec_state)) return;
- CallEventCallback(event, exec_state, event_data, NULL);
+ CallEventCallback(event, exec_state, event_data, NULL);
+ } else {
+ // Process debug event.
+ ProcessDebugEvent(event, Handle<JSObject>::cast(event_data), true);
+ }
}
@@ -2216,7 +2115,7 @@ void Debug::NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> exception;
if (!maybe_exception.ToHandle(&exception)) break;
Handle<Object> result;
- if (!Execution::ToString(isolate_, exception).ToHandle(&result)) break;
+ if (!Object::ToString(isolate_, exception).ToHandle(&result)) break;
answer = Handle<String>::cast(result);
}
@@ -2345,7 +2244,7 @@ void Debug::HandleDebugBreak() {
Object* fun = it.frame()->function();
if (fun && fun->IsJSFunction()) {
// Don't stop in builtin functions.
- if (JSFunction::cast(fun)->IsBuiltin()) return;
+ if (!JSFunction::cast(fun)->IsSubjectToDebugging()) return;
GlobalObject* global = JSFunction::cast(fun)->context()->global_object();
// Don't stop in debugger functions.
if (IsDebugGlobal(global)) return;
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index 4b5b7b7b90..640355a7e6 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -186,29 +186,6 @@ class BreakLocation {
};
-// Cache of all script objects in the heap. When a script is added a weak handle
-// to it is created and that weak handle is stored in the cache. The weak handle
-// callback takes care of removing the script from the cache. The key used in
-// the cache is the script id.
-class ScriptCache {
- public:
- explicit ScriptCache(Isolate* isolate);
- ~ScriptCache();
-
- // Add script to the cache.
- void Add(Handle<Script> script);
-
- // Return the scripts in the cache.
- Handle<FixedArray> GetScripts() {
- return WeakValueHashTable::GetWeakValues(table_);
- }
-
- private:
- Isolate* isolate_;
- Handle<WeakValueHashTable> table_;
-};
-
-
// Linked list holding debug info objects. The debug info objects are kept as
// weak handles to avoid a debug info object to keep a function alive.
class DebugInfoListNode {
@@ -497,7 +474,7 @@ class Debug {
void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; }
bool live_edit_enabled() const {
- return FLAG_enable_liveedit && live_edit_enabled_ ;
+ return FLAG_enable_liveedit && live_edit_enabled_;
}
inline bool is_active() const { return is_active_; }
@@ -573,8 +550,7 @@ class Debug {
Handle<Object> exec_state,
Handle<Object> event_data,
v8::Debug::ClientData* client_data);
- void ProcessCompileEventInDebugScope(v8::DebugEvent event,
- Handle<Script> script);
+ void ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script);
void ProcessDebugEvent(v8::DebugEvent event,
Handle<JSObject> event_data,
bool auto_continue);
@@ -622,7 +598,6 @@ class Debug {
bool break_on_exception_;
bool break_on_uncaught_exception_;
- ScriptCache* script_cache_; // Cache of all scripts in the heap.
DebugInfoListNode* debug_info_list_; // List of active debug info objects.
// Storage location for jump when exiting debug break calls.
diff --git a/deps/v8/src/debug/liveedit.cc b/deps/v8/src/debug/liveedit.cc
index 8312dd3b6d..8a936ac177 100644
--- a/deps/v8/src/debug/liveedit.cc
+++ b/deps/v8/src/debug/liveedit.cc
@@ -11,6 +11,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/global-handles.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/scopeinfo.h"
@@ -994,10 +995,12 @@ class LiteralFixer {
// collect all functions and fix their literal arrays.
Handle<FixedArray> function_instances =
CollectJSFunctions(shared_info, isolate);
+ Handle<TypeFeedbackVector> vector(shared_info->feedback_vector());
+
for (int i = 0; i < function_instances->length(); i++) {
Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<FixedArray> new_literals =
- isolate->factory()->NewFixedArray(new_literal_count);
+ Handle<LiteralsArray> new_literals =
+ LiteralsArray::New(isolate, vector, new_literal_count, TENURED);
fun->set_literals(*new_literals);
}
diff --git a/deps/v8/src/debug/mirrors.js b/deps/v8/src/debug/mirrors.js
index f47a2d058e..11f9e485c1 100644
--- a/deps/v8/src/debug/mirrors.js
+++ b/deps/v8/src/debug/mirrors.js
@@ -13,11 +13,15 @@ var GlobalArray = global.Array;
var IsNaN = global.isNaN;
var JSONStringify = global.JSON.stringify;
var MathMin = global.Math.min;
+var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
+var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var SymbolToString;
var ToBoolean;
var ToString;
utils.Import(function(from) {
FunctionSourceString = from.FunctionSourceString;
+ SymbolToString = from.SymbolToString;
ToBoolean = from.ToBoolean;
ToString = from.ToString;
});
@@ -103,16 +107,9 @@ function ClearMirrorCache(value) {
}
-// Wrapper to check whether an object is a Promise. The call may not work
-// if promises are not enabled.
-// TODO(yangguo): remove try-catch once promises are enabled by default.
function ObjectIsPromise(value) {
- try {
- return IS_SPEC_OBJECT(value) &&
- !IS_UNDEFINED(%DebugGetProperty(value, builtins.$promiseStatus));
- } catch (e) {
- return false;
- }
+ return IS_SPEC_OBJECT(value) &&
+ !IS_UNDEFINED(%DebugGetProperty(value, promiseStatusSymbol));
}
@@ -692,7 +689,7 @@ SymbolMirror.prototype.description = function() {
SymbolMirror.prototype.toText = function() {
- return %_CallFunction(this.value_, builtins.$symbolToString);
+ return %_CallFunction(this.value_, SymbolToString);
}
@@ -859,7 +856,7 @@ ObjectMirror.prototype.internalProperties = function() {
ObjectMirror.prototype.property = function(name) {
- var details = %DebugGetPropertyDetails(this.value_, builtins.$toName(name));
+ var details = %DebugGetPropertyDetails(this.value_, TO_NAME(name));
if (details) {
return new PropertyMirror(this, name, details);
}
@@ -1326,7 +1323,7 @@ inherits(PromiseMirror, ObjectMirror);
function PromiseGetStatus_(value) {
- var status = %DebugGetProperty(value, builtins.$promiseStatus);
+ var status = %DebugGetProperty(value, promiseStatusSymbol);
if (status == 0) return "pending";
if (status == 1) return "resolved";
return "rejected";
@@ -1334,7 +1331,7 @@ function PromiseGetStatus_(value) {
function PromiseGetValue_(value) {
- return %DebugGetProperty(value, builtins.$promiseValue);
+ return %DebugGetProperty(value, promiseValueSymbol);
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 0efc4de369..fdca98e90a 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/deoptimizer.h"
#include "src/accessors.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
-#include "src/deoptimizer.h"
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/macro-assembler.h"
#include "src/prettyprinter.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/v8.h"
namespace v8 {
@@ -1455,7 +1455,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating %s => StubFailureTrampolineStub, height=%d\n",
- CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false),
+ CodeStub::MajorName(static_cast<CodeStub::Major>(major_key)),
height_in_bytes);
}
@@ -1852,7 +1852,7 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
}
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
- CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size);
+ Assembler::FlushICache(isolate, chunk->area_start(), desc.instr_size);
data->deopt_entry_code_entries_[type] = entry_count;
}
@@ -2252,7 +2252,7 @@ Handle<FixedArray> MaterializedObjectStore::EnsureStackEntries(int length) {
for (int i = array->length(); i < length; i++) {
new_array->set(i, isolate()->heap()->undefined_value());
}
- isolate()->heap()->public_set_materialized_objects(*new_array);
+ isolate()->heap()->SetRootMaterializedObjects(*new_array);
return new_array;
}
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index 411b09fcaa..b16f090e9d 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -214,7 +214,7 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
CodeStub::Major major_key = CodeStub::GetMajorKey(code);
DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
out.AddFormatted(" %s, %s, ", Code::Kind2String(kind),
- CodeStub::MajorName(major_key, false));
+ CodeStub::MajorName(major_key));
out.AddFormatted("minor: %d", minor_key);
} else {
out.AddFormatted(" %s", Code::Kind2String(kind));
diff --git a/deps/v8/src/dtoa.h b/deps/v8/src/dtoa.h
index dd88688d08..ca6277ee56 100644
--- a/deps/v8/src/dtoa.h
+++ b/deps/v8/src/dtoa.h
@@ -5,6 +5,8 @@
#ifndef V8_DTOA_H_
#define V8_DTOA_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/effects.h b/deps/v8/src/effects.h
index e18baeafd6..8d539f64bd 100644
--- a/deps/v8/src/effects.h
+++ b/deps/v8/src/effects.h
@@ -35,11 +35,11 @@ struct Effect {
// The unknown effect.
static Effect Unknown(Zone* zone) {
- return Effect(Bounds::Unbounded(zone), POSSIBLE);
+ return Effect(Bounds::Unbounded(), POSSIBLE);
}
static Effect Forget(Zone* zone) {
- return Effect(Bounds::Unbounded(zone), DEFINITE);
+ return Effect(Bounds::Unbounded(), DEFINITE);
}
// Sequential composition, as in 'e1; e2'.
@@ -87,7 +87,7 @@ class EffectsMixin: public Base {
Bounds LookupBounds(Var var) {
Effect effect = Lookup(var);
return effect.modality == Effect::DEFINITE
- ? effect.bounds : Bounds::Unbounded(Base::zone());
+ ? effect.bounds : Bounds::Unbounded();
}
// Sequential composition.
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index 0254a4fb59..1397bd7c19 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -158,9 +158,8 @@ inline bool IsHoleyElementsKind(ElementsKind kind) {
inline bool IsFastPackedElementsKind(ElementsKind kind) {
- return kind == FAST_SMI_ELEMENTS ||
- kind == FAST_DOUBLE_ELEMENTS ||
- kind == FAST_ELEMENTS;
+ return kind == FAST_SMI_ELEMENTS || kind == FAST_DOUBLE_ELEMENTS ||
+ kind == FAST_ELEMENTS;
}
@@ -212,6 +211,15 @@ bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind,
ElementsKind to_kind);
+inline ElementsKind GetMoreGeneralElementsKind(ElementsKind from_kind,
+ ElementsKind to_kind) {
+ if (IsMoreGeneralElementsKindTransition(from_kind, to_kind)) {
+ return to_kind;
+ }
+ return from_kind;
+}
+
+
inline bool IsTransitionableFastElementsKind(ElementsKind from_kind) {
return IsFastElementsKind(from_kind) &&
from_kind != TERMINAL_FAST_ELEMENTS_KIND;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 3e80d5570b..4a8308128b 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/elements.h"
#include "src/arguments.h"
#include "src/conversions.h"
-#include "src/elements.h"
+#include "src/factory.h"
#include "src/messages.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/utils.h"
// Each concrete ElementsAccessor can handle exactly one ElementsKind,
@@ -51,6 +51,8 @@ namespace {
static const int kPackedSizeNotKnown = -1;
+enum Where { AT_START, AT_END };
+
// First argument in list is the accessor class, the second argument is the
// accessor ElementsKind, and the third is the backing store class. Use the
@@ -97,31 +99,17 @@ ELEMENTS_LIST(ELEMENTS_TRAITS)
#undef ELEMENTS_TRAITS
-static bool HasIndex(Handle<FixedArray> array, Handle<Object> index_handle) {
- DisallowHeapAllocation no_gc;
- Object* index = *index_handle;
- int len0 = array->length();
- for (int i = 0; i < len0; i++) {
- Object* element = array->get(i);
- if (index->KeyEquals(element)) return true;
- }
- return false;
-}
-
-
MUST_USE_RESULT
-static MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
+MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidArrayLength),
Object);
}
-static void CopyObjectToObjectElements(FixedArrayBase* from_base,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind, uint32_t to_start,
- int raw_copy_size) {
+void CopyObjectToObjectElements(FixedArrayBase* from_base,
+ ElementsKind from_kind, uint32_t from_start,
+ FixedArrayBase* to_base, ElementsKind to_kind,
+ uint32_t to_start, int raw_copy_size) {
DCHECK(to_base->map() !=
from_base->GetIsolate()->heap()->fixed_cow_array_map());
DisallowHeapAllocation no_allocation;
@@ -222,9 +210,7 @@ static void CopyDictionaryToObjectElements(
static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
uint32_t from_start,
FixedArrayBase* to_base,
- ElementsKind to_kind, uint32_t to_start,
- int raw_copy_size) {
- DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
+ uint32_t to_start, int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
DisallowHeapAllocation no_allocation;
@@ -255,12 +241,15 @@ static void CopyDoubleToObjectElements(FixedArrayBase* from_base,
Isolate* isolate = from_base->GetIsolate();
Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate);
Handle<FixedArray> to(FixedArray::cast(to_base), isolate);
- for (int i = 0; i < copy_size; ++i) {
+
+ // create an outer loop to not waste too much time on creating HandleScopes
+ // on the other hand we might overflow a single handle scope depending on
+ // the copy_size
+ int offset = 0;
+ while (offset < copy_size) {
HandleScope scope(isolate);
- if (IsFastSmiElementsKind(to_kind)) {
- UNIMPLEMENTED();
- } else {
- DCHECK(IsFastObjectElementsKind(to_kind));
+ offset += 100;
+ for (int i = offset - 100; i < offset && i < copy_size; ++i) {
Handle<Object> value = FixedDoubleArray::get(from, i + from_start);
to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
}
@@ -517,8 +506,51 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::ValidateImpl(holder);
}
+ virtual bool IsPacked(Handle<JSObject> holder,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t end) final {
+ return ElementsAccessorSubclass::IsPackedImpl(holder, backing_store, start,
+ end);
+ }
+
+ static bool IsPackedImpl(Handle<JSObject> holder,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t end) {
+ if (IsFastPackedElementsKind(kind())) return true;
+ for (uint32_t i = start; i < end; i++) {
+ if (!ElementsAccessorSubclass::HasElementImpl(holder, i, backing_store)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
+ if (!IsHoleyElementsKind(kind())) return;
+ int length = Smi::cast(array->length())->value();
+ Handle<FixedArrayBase> backing_store(array->elements());
+ if (!ElementsAccessorSubclass::IsPackedImpl(array, backing_store, 0,
+ length)) {
+ return;
+ }
+ ElementsKind packed_kind = GetPackedElementsKind(kind());
+ Handle<Map> new_map =
+ JSObject::GetElementsTransitionMap(array, packed_kind);
+ JSObject::MigrateToMap(array, new_map);
+ if (FLAG_trace_elements_transitions) {
+ JSObject::PrintElementsTransition(stdout, array, kind(), backing_store,
+ packed_kind, backing_store);
+ }
+ }
+
virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
Handle<FixedArrayBase> backing_store) final {
+ return ElementsAccessorSubclass::HasElementImpl(holder, index,
+ backing_store);
+ }
+
+ static bool HasElementImpl(Handle<JSObject> holder, uint32_t index,
+ Handle<FixedArrayBase> backing_store) {
return ElementsAccessorSubclass::GetEntryForIndexImpl(
*holder, *backing_store, index) != kMaxUInt32;
}
@@ -539,9 +571,15 @@ class ElementsAccessorBase : public ElementsAccessor {
ElementsAccessorSubclass::SetImpl(backing_store, entry, value);
}
- static void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
- Object* value) {
- BackingStore::cast(backing_store)->SetValue(entry, value);
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value) {
+ UNREACHABLE();
+ }
+
+
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value, WriteBarrierMode mode) {
+ UNREACHABLE();
}
virtual void Reconfigure(Handle<JSObject> object,
@@ -573,47 +611,170 @@ class ElementsAccessorBase : public ElementsAccessor {
}
virtual uint32_t Push(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, Object** objects,
- uint32_t push_size, int direction) {
- return ElementsAccessorSubclass::PushImpl(receiver, backing_store, objects,
- push_size, direction);
+ Handle<FixedArrayBase> backing_store, Arguments* args,
+ uint32_t push_size) final {
+ return ElementsAccessorSubclass::PushImpl(receiver, backing_store, args,
+ push_size);
}
static uint32_t PushImpl(Handle<JSArray> receiver,
- Handle<FixedArrayBase> elms_obj, Object** objects,
- uint32_t push_size, int direction) {
+ Handle<FixedArrayBase> elms_obj, Arguments* args,
+ uint32_t push_sized) {
+ UNREACHABLE();
+ return 0;
+ }
+
+ virtual uint32_t Unshift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t unshift_size) final {
+ return ElementsAccessorSubclass::UnshiftImpl(receiver, backing_store, args,
+ unshift_size);
+ }
+
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> elms_obj, Arguments* args,
+ uint32_t unshift_size) {
UNREACHABLE();
return 0;
}
+ virtual Handle<JSArray> Slice(Handle<JSObject> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t end) final {
+ return ElementsAccessorSubclass::SliceImpl(receiver, backing_store, start,
+ end);
+ }
+
+ static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t end) {
+ UNREACHABLE();
+ return Handle<JSArray>();
+ }
+
+ virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t delete_count,
+ Arguments* args, uint32_t add_count) final {
+ return ElementsAccessorSubclass::SpliceImpl(receiver, backing_store, start,
+ delete_count, args, add_count);
+ }
+
+ static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t delete_count,
+ Arguments* args, uint32_t add_count) {
+ UNREACHABLE();
+ return Handle<JSArray>();
+ }
+
+ virtual Handle<Object> Pop(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) final {
+ return ElementsAccessorSubclass::PopImpl(receiver, backing_store);
+ }
+
+ static Handle<Object> PopImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) {
+ UNREACHABLE();
+ return Handle<Object>();
+ }
+
+ virtual Handle<Object> Shift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) final {
+ return ElementsAccessorSubclass::ShiftImpl(receiver, backing_store);
+ }
+
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) {
+ UNREACHABLE();
+ return Handle<Object>();
+ }
+
virtual void SetLength(Handle<JSArray> array, uint32_t length) final {
ElementsAccessorSubclass::SetLengthImpl(array, length,
handle(array->elements()));
}
static void SetLengthImpl(Handle<JSArray> array, uint32_t length,
- Handle<FixedArrayBase> backing_store);
+ Handle<FixedArrayBase> backing_store) {
+ DCHECK(!array->SetLengthWouldNormalize(length));
+ DCHECK(IsFastElementsKind(array->GetElementsKind()));
+ uint32_t old_length = 0;
+ CHECK(array->length()->ToArrayIndex(&old_length));
+
+ if (old_length < length) {
+ ElementsKind kind = array->GetElementsKind();
+ if (!IsFastHoleyElementsKind(kind)) {
+ kind = GetHoleyElementsKind(kind);
+ JSObject::TransitionElementsKind(array, kind);
+ }
+ }
+
+ // Check whether the backing store should be shrunk.
+ uint32_t capacity = backing_store->length();
+ if (length == 0) {
+ array->initialize_elements();
+ } else if (length <= capacity) {
+ if (array->HasFastSmiOrObjectElements()) {
+ backing_store = JSObject::EnsureWritableFastElements(array);
+ }
+ if (2 * length <= capacity) {
+ // If more than half the elements won't be used, trim the array.
+ array->GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
+ *backing_store, capacity - length);
+ } else {
+ // Otherwise, fill the unused tail with holes.
+ for (uint32_t i = length; i < old_length; i++) {
+ BackingStore::cast(*backing_store)->set_the_hole(i);
+ }
+ }
+ } else {
+ // Check whether the backing store should be expanded.
+ capacity = Max(length, JSObject::NewElementsCapacity(capacity));
+ ElementsAccessorSubclass::GrowCapacityAndConvertImpl(array, capacity);
+ }
+
+ array->set_length(Smi::FromInt(length));
+ JSObject::ValidateElements(array);
+ }
static Handle<FixedArrayBase> ConvertElementsWithCapacity(
Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
ElementsKind from_kind, uint32_t capacity) {
+ return ConvertElementsWithCapacity(
+ object, old_elements, from_kind, capacity, 0, 0,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
+ }
+
+ static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, uint32_t capacity, int copy_size) {
+ return ConvertElementsWithCapacity(object, old_elements, from_kind,
+ capacity, 0, 0, copy_size);
+ }
+
+ static Handle<FixedArrayBase> ConvertElementsWithCapacity(
+ Handle<JSObject> object, Handle<FixedArrayBase> old_elements,
+ ElementsKind from_kind, uint32_t capacity, uint32_t src_index,
+ uint32_t dst_index, int copy_size) {
Isolate* isolate = object->GetIsolate();
- Handle<FixedArrayBase> elements;
+ Handle<FixedArrayBase> new_elements;
if (IsFastDoubleElementsKind(kind())) {
- elements = isolate->factory()->NewFixedDoubleArray(capacity);
+ new_elements = isolate->factory()->NewFixedDoubleArray(capacity);
} else {
- elements = isolate->factory()->NewUninitializedFixedArray(capacity);
+ new_elements = isolate->factory()->NewUninitializedFixedArray(capacity);
}
- int packed = kPackedSizeNotKnown;
+ int packed_size = kPackedSizeNotKnown;
if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) {
- packed = Smi::cast(JSArray::cast(*object)->length())->value();
+ packed_size = Smi::cast(JSArray::cast(*object)->length())->value();
}
ElementsAccessorSubclass::CopyElementsImpl(
- *old_elements, 0, *elements, from_kind, 0, packed,
- ElementsAccessor::kCopyToEndAndInitializeToHole);
- return elements;
+ *old_elements, src_index, *new_elements, from_kind, dst_index,
+ packed_size, copy_size);
+
+ return new_elements;
}
static void GrowCapacityAndConvertImpl(Handle<JSObject> object,
@@ -707,78 +868,26 @@ class ElementsAccessorBase : public ElementsAccessor {
from, from_start, *to, from_kind, to_start, packed_size, copy_size);
}
- virtual Handle<FixedArray> AddElementsToFixedArray(
- Handle<JSObject> receiver, Handle<FixedArray> to,
- FixedArray::KeyFilter filter) final {
+ virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ FixedArray::KeyFilter filter) final {
Handle<FixedArrayBase> from(receiver->elements());
-
- int len0 = to->length();
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- for (int i = 0; i < len0; i++) {
- DCHECK(!to->get(i)->IsTheHole());
- }
- }
-#endif
-
- // Optimize if 'other' is empty.
- // We cannot optimize if 'this' is empty, as other may have holes.
- uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
- if (len1 == 0) return to;
-
- Isolate* isolate = from->GetIsolate();
-
- // Compute how many elements are not in other.
- uint32_t extra = 0;
- for (uint32_t y = 0; y < len1; y++) {
- if (ElementsAccessorSubclass::HasEntryImpl(*from, y)) {
- Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, y);
-
- DCHECK(!value->IsTheHole());
- DCHECK(!value->IsAccessorPair());
- DCHECK(!value->IsExecutableAccessorInfo());
- if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
- continue;
- }
- if (!HasIndex(to, value)) {
- extra++;
- }
- }
- }
-
- if (extra == 0) return to;
-
- // Allocate the result
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(len0 + extra);
-
- // Fill in the content
- {
- DisallowHeapAllocation no_gc;
- WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
- for (int i = 0; i < len0; i++) {
- Object* e = to->get(i);
- DCHECK(e->IsString() || e->IsNumber());
- result->set(i, e, mode);
- }
- }
- // Fill in the extra values.
- uint32_t entry = 0;
- for (uint32_t y = 0; y < len1; y++) {
- if (ElementsAccessorSubclass::HasEntryImpl(*from, y)) {
- Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, y);
- DCHECK(!value->IsAccessorPair());
- DCHECK(!value->IsExecutableAccessorInfo());
- if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
- continue;
- }
- if (!value->IsTheHole() && !HasIndex(to, value)) {
- result->set(len0 + entry, *value);
- entry++;
- }
+ uint32_t add_length =
+ ElementsAccessorSubclass::GetCapacityImpl(*receiver, *from);
+ if (add_length == 0) return;
+ accumulator->PrepareForComparisons(add_length);
+ int prev_key_count = accumulator->GetLength();
+ for (uint32_t i = 0; i < add_length; i++) {
+ if (!ElementsAccessorSubclass::HasEntryImpl(*from, i)) continue;
+ Handle<Object> value = ElementsAccessorSubclass::GetImpl(from, i);
+ DCHECK(!value->IsTheHole());
+ DCHECK(!value->IsAccessorPair());
+ DCHECK(!value->IsExecutableAccessorInfo());
+ if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
+ continue;
}
+ accumulator->AddKey(value, prev_key_count);
}
- DCHECK(extra == entry);
- return result;
}
static uint32_t GetCapacityImpl(JSObject* holder,
@@ -929,7 +1038,8 @@ class DictionaryElementsAccessor
return handle(GetRaw(*store, entry), isolate);
}
- static void SetImpl(FixedArrayBase* store, uint32_t entry, Object* value) {
+ static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
+ Object* value) {
SeededNumberDictionary* dictionary = SeededNumberDictionary::cast(store);
dictionary->ValueAtPut(entry, value);
}
@@ -1157,52 +1267,219 @@ class FastElementsAccessor
#endif
}
+ static Handle<Object> PopImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) {
+ return FastElementsAccessorSubclass::RemoveElement(receiver, backing_store,
+ AT_END);
+ }
+
+ static Handle<Object> ShiftImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) {
+ return FastElementsAccessorSubclass::RemoveElement(receiver, backing_store,
+ AT_START);
+ }
+
static uint32_t PushImpl(Handle<JSArray> receiver,
Handle<FixedArrayBase> backing_store,
- Object** objects, uint32_t push_size,
- int direction) {
- uint32_t len = Smi::cast(receiver->length())->value();
- if (push_size == 0) {
- return len;
+ Arguments* args, uint32_t push_size) {
+ return FastElementsAccessorSubclass::AddArguments(receiver, backing_store,
+ args, push_size, AT_END);
+ }
+
+ static uint32_t UnshiftImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t unshift_size) {
+ return FastElementsAccessorSubclass::AddArguments(
+ receiver, backing_store, args, unshift_size, AT_START);
+ }
+
+ static void MoveElements(Heap* heap, Handle<FixedArrayBase> backing_store,
+ int dst_index, int src_index, int len,
+ int hole_start, int hole_end) {
+ UNREACHABLE();
+ }
+
+ static Handle<JSArray> SliceImpl(Handle<JSObject> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t end) {
+ DCHECK(start < end);
+ Isolate* isolate = receiver->GetIsolate();
+ int result_len = end - start;
+ Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+ KindTraits::Kind, result_len, result_len);
+ DisallowHeapAllocation no_gc;
+ FastElementsAccessorSubclass::CopyElementsImpl(
+ *backing_store, start, result_array->elements(), KindTraits::Kind, 0,
+ kPackedSizeNotKnown, result_len);
+ FastElementsAccessorSubclass::TryTransitionResultArrayToPacked(
+ result_array);
+ return result_array;
+ }
+
+ static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t delete_count,
+ Arguments* args, uint32_t add_count) {
+ Isolate* isolate = receiver->GetIsolate();
+ Heap* heap = isolate->heap();
+ uint32_t length = Smi::cast(receiver->length())->value();
+ uint32_t new_length = length - delete_count + add_count;
+
+ if (new_length == 0) {
+ receiver->set_elements(heap->empty_fixed_array());
+ receiver->set_length(Smi::FromInt(0));
+ return isolate->factory()->NewJSArrayWithElements(
+ backing_store, KindTraits::Kind, delete_count);
}
+
+ // Construct the result array which holds the deleted elements.
+ Handle<JSArray> deleted_elements = isolate->factory()->NewJSArray(
+ KindTraits::Kind, delete_count, delete_count);
+ if (delete_count > 0) {
+ DisallowHeapAllocation no_gc;
+ FastElementsAccessorSubclass::CopyElementsImpl(
+ *backing_store, start, deleted_elements->elements(), KindTraits::Kind,
+ 0, kPackedSizeNotKnown, delete_count);
+ }
+
+ // Delete and move elements to make space for add_count new elements.
+ if (add_count < delete_count) {
+ FastElementsAccessorSubclass::SpliceShrinkStep(backing_store, heap, start,
+ delete_count, add_count,
+ length, new_length);
+ } else if (add_count > delete_count) {
+ backing_store = FastElementsAccessorSubclass::SpliceGrowStep(
+ receiver, backing_store, isolate, heap, start, delete_count,
+ add_count, length, new_length);
+ }
+
+ // Copy over the arguments.
+ FastElementsAccessorSubclass::CopyArguments(args, backing_store, add_count,
+ 3, start);
+
+ receiver->set_length(Smi::FromInt(new_length));
+ FastElementsAccessorSubclass::TryTransitionResultArrayToPacked(
+ deleted_elements);
+ return deleted_elements;
+ }
+
+ private:
+ static void SpliceShrinkStep(Handle<FixedArrayBase> backing_store, Heap* heap,
+ uint32_t start, uint32_t delete_count,
+ uint32_t add_count, uint32_t len,
+ uint32_t new_length) {
+ const int move_left_count = len - delete_count - start;
+ const int move_left_dst_index = start + add_count;
+ FastElementsAccessorSubclass::MoveElements(
+ heap, backing_store, move_left_dst_index, start + delete_count,
+ move_left_count, new_length, len);
+ }
+
+
+ static Handle<FixedArrayBase> SpliceGrowStep(
+ Handle<JSArray> receiver, Handle<FixedArrayBase> backing_store,
+ Isolate* isolate, Heap* heap, uint32_t start, uint32_t delete_count,
+ uint32_t add_count, uint32_t length, uint32_t new_length) {
+ // Check we do not overflow the new_length.
+ DCHECK((add_count - delete_count) <= (Smi::kMaxValue - length));
+ // Check if backing_store is big enough.
+ if (new_length <= static_cast<uint32_t>(backing_store->length())) {
+ FastElementsAccessorSubclass::MoveElements(
+ heap, backing_store, start + add_count, start + delete_count,
+ (length - delete_count - start), 0, 0);
+ return backing_store;
+ }
+ // New backing storage is needed.
+ int capacity = JSObject::NewElementsCapacity(new_length);
+ // Partially copy all elements up to start.
+ Handle<FixedArrayBase> new_elms =
+ FastElementsAccessorSubclass::ConvertElementsWithCapacity(
+ receiver, backing_store, KindTraits::Kind, capacity, start);
+ // Copy the trailing elements after start + delete_count
+ FastElementsAccessorSubclass::CopyElementsImpl(
+ *backing_store, start + delete_count, *new_elms, KindTraits::Kind,
+ start + add_count, kPackedSizeNotKnown,
+ ElementsAccessor::kCopyToEndAndInitializeToHole);
+ receiver->set_elements(*new_elms);
+ return new_elms;
+ }
+
+ static Handle<Object> RemoveElement(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Where remove_position) {
+ uint32_t length =
+ static_cast<uint32_t>(Smi::cast(receiver->length())->value());
+ Isolate* isolate = receiver->GetIsolate();
+ DCHECK(length > 0);
+ int new_length = length - 1;
+ int remove_index = remove_position == AT_START ? 0 : new_length;
+ Handle<Object> result =
+ FastElementsAccessorSubclass::GetImpl(backing_store, remove_index);
+ if (remove_position == AT_START) {
+ Heap* heap = isolate->heap();
+ FastElementsAccessorSubclass::MoveElements(heap, backing_store, 0, 1,
+ new_length, 0, 0);
+ }
+ FastElementsAccessorSubclass::SetLengthImpl(receiver, new_length,
+ backing_store);
+
+ if (IsHoleyElementsKind(KindTraits::Kind) && result->IsTheHole()) {
+ return receiver->GetIsolate()->factory()->undefined_value();
+ }
+ return result;
+ }
+
+ static uint32_t AddArguments(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t add_size,
+ Where remove_position) {
+ uint32_t length = Smi::cast(receiver->length())->value();
+ DCHECK(add_size > 0);
uint32_t elms_len = backing_store->length();
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- DCHECK(push_size <= static_cast<uint32_t>(Smi::kMaxValue - len));
- uint32_t new_length = len + push_size;
- Handle<FixedArrayBase> new_elms;
+ // Check we do not overflow the new_length.
+ DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length));
+ uint32_t new_length = length + add_size;
if (new_length > elms_len) {
// New backing storage is needed.
- uint32_t capacity = new_length + (new_length >> 1) + 16;
- new_elms = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
- receiver, backing_store, KindTraits::Kind, capacity);
- } else {
- // push_size is > 0 and new_length <= elms_len, so backing_store cannot be
- // the
- // empty_fixed_array.
- new_elms = backing_store;
+ uint32_t capacity = JSObject::NewElementsCapacity(new_length);
+ // If we add arguments to the start we have to shift the existing objects.
+ int copy_dst_index = remove_position == AT_START ? add_size : 0;
+ // Copy over all objects to a new backing_store.
+ backing_store = FastElementsAccessorSubclass::ConvertElementsWithCapacity(
+ receiver, backing_store, KindTraits::Kind, capacity, 0,
+ copy_dst_index, ElementsAccessor::kCopyToEndAndInitializeToHole);
+ receiver->set_elements(*backing_store);
+ } else if (remove_position == AT_START) {
+ // If the backing store has enough capacity and we add elements to the
+ // start we have to shift the existing objects.
+ Isolate* isolate = receiver->GetIsolate();
+ FastElementsAccessorSubclass::MoveElements(isolate->heap(), backing_store,
+ add_size, 0, length, 0, 0);
}
- // Add the provided values.
- DisallowHeapAllocation no_gc;
- DCHECK(direction == ElementsAccessor::kDirectionForward ||
- direction == ElementsAccessor::kDirectionReverse);
- STATIC_ASSERT(ElementsAccessor::kDirectionForward == 1);
- STATIC_ASSERT(ElementsAccessor::kDirectionReverse == -1);
- for (uint32_t index = 0; index < push_size; index++) {
- int offset = direction * index;
- Object* object = objects[offset];
- FastElementsAccessorSubclass::SetImpl(*new_elms, index + len, object);
- }
- if (!new_elms.is_identical_to(backing_store)) {
- receiver->set_elements(*new_elms);
- }
- DCHECK(*new_elms == receiver->elements());
+ int insertion_index = remove_position == AT_START ? 0 : length;
+ // Copy the arguments to the start.
+ FastElementsAccessorSubclass::CopyArguments(args, backing_store, add_size,
+ 1, insertion_index);
// Set the length.
receiver->set_length(Smi::FromInt(new_length));
return new_length;
}
+
+ static void CopyArguments(Arguments* args, Handle<FixedArrayBase> dst_store,
+ uint32_t copy_size, uint32_t src_index,
+ uint32_t dst_index) {
+ // Add the provided values.
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* raw_backing_store = *dst_store;
+ WriteBarrierMode mode = raw_backing_store->GetWriteBarrierMode(no_gc);
+ for (uint32_t i = 0; i < copy_size; i++) {
+ Object* argument = (*args)[i + src_index];
+ FastElementsAccessorSubclass::SetImpl(raw_backing_store, i + dst_index,
+ argument, mode);
+ }
+ }
};
@@ -1215,12 +1492,35 @@ class FastSmiOrObjectElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value) {
+ FixedArray::cast(backing_store)->set(entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value, WriteBarrierMode mode) {
+ FixedArray::cast(backing_store)->set(entry, value, mode);
+ }
+
static Object* GetRaw(FixedArray* backing_store, uint32_t entry) {
uint32_t index = FastElementsAccessorSubclass::GetIndexForEntryImpl(
backing_store, entry);
return backing_store->get(index);
}
+ static void MoveElements(Heap* heap, Handle<FixedArrayBase> backing_store,
+ int dst_index, int src_index, int len,
+ int hole_start, int hole_end) {
+ Handle<FixedArray> dst_elms = Handle<FixedArray>::cast(backing_store);
+ if (len != 0) {
+ DisallowHeapAllocation no_gc;
+ heap->MoveElements(*dst_elms, dst_index, src_index, len);
+ }
+ if (hole_start != hole_end) {
+ dst_elms->FillWithHoles(hole_start, hole_end);
+ }
+ }
+
// NOTE: this method violates the handlified function signature convention:
// raw pointer parameters in the function that allocates.
// See ElementsAccessor::CopyElements() for details.
@@ -1243,8 +1543,8 @@ class FastSmiOrObjectElementsAccessor
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS: {
AllowHeapAllocation allow_allocation;
- CopyDoubleToObjectElements(
- from, from_start, to, to_kind, to_start, copy_size);
+ DCHECK(IsFastObjectElementsKind(to_kind));
+ CopyDoubleToObjectElements(from, from_start, to, to_start, copy_size);
break;
}
case DICTIONARY_ELEMENTS:
@@ -1321,6 +1621,30 @@ class FastDoubleElementsAccessor
: FastElementsAccessor<FastElementsAccessorSubclass,
KindTraits>(name) {}
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value) {
+ FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
+ }
+
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value, WriteBarrierMode mode) {
+ FixedDoubleArray::cast(backing_store)->set(entry, value->Number());
+ }
+
+ static void MoveElements(Heap* heap, Handle<FixedArrayBase> backing_store,
+ int dst_index, int src_index, int len,
+ int hole_start, int hole_end) {
+ Handle<FixedDoubleArray> dst_elms =
+ Handle<FixedDoubleArray>::cast(backing_store);
+ if (len != 0) {
+ MemMove(dst_elms->data_start() + dst_index,
+ dst_elms->data_start() + src_index, len * kDoubleSize);
+ }
+ if (hole_start != hole_end) {
+ dst_elms->FillWithHoles(hole_start, hole_end);
+ }
+ }
+
static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start,
FixedArrayBase* to, ElementsKind from_kind,
uint32_t to_start, int packed_size,
@@ -1397,6 +1721,16 @@ class TypedElementsAccessor
typedef typename ElementsKindTraits<Kind>::BackingStore BackingStore;
typedef TypedElementsAccessor<Kind> AccessorClass;
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value) {
+ BackingStore::cast(backing_store)->SetValue(entry, value);
+ }
+
+ static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
+ Object* value, WriteBarrierMode mode) {
+ BackingStore::cast(backing_store)->SetValue(entry, value);
+ }
+
static Handle<Object> GetImpl(Handle<FixedArrayBase> backing_store,
uint32_t entry) {
uint32_t index = GetIndexForEntryImpl(*backing_store, entry);
@@ -1497,7 +1831,8 @@ class SloppyArgumentsElementsAccessor
UNREACHABLE();
}
- static void SetImpl(FixedArrayBase* store, uint32_t entry, Object* value) {
+ static inline void SetImpl(FixedArrayBase* store, uint32_t entry,
+ Object* value) {
FixedArray* parameter_map = FixedArray::cast(store);
uint32_t length = parameter_map->length() - 2;
if (entry < length) {
@@ -1778,50 +2113,6 @@ class FastSloppyArgumentsElementsAccessor
};
-template <typename ElementsAccessorSubclass, typename ElementsKindTraits>
-void ElementsAccessorBase<ElementsAccessorSubclass, ElementsKindTraits>::
- SetLengthImpl(Handle<JSArray> array, uint32_t length,
- Handle<FixedArrayBase> backing_store) {
- DCHECK(!array->SetLengthWouldNormalize(length));
- DCHECK(IsFastElementsKind(array->GetElementsKind()));
- uint32_t old_length = 0;
- CHECK(array->length()->ToArrayIndex(&old_length));
-
- if (old_length < length) {
- ElementsKind kind = array->GetElementsKind();
- if (!IsFastHoleyElementsKind(kind)) {
- kind = GetHoleyElementsKind(kind);
- JSObject::TransitionElementsKind(array, kind);
- }
- }
-
- // Check whether the backing store should be shrunk.
- uint32_t capacity = backing_store->length();
- if (length == 0) {
- array->initialize_elements();
- } else if (length <= capacity) {
- if (array->HasFastSmiOrObjectElements()) {
- backing_store = JSObject::EnsureWritableFastElements(array);
- }
- if (2 * length <= capacity) {
- // If more than half the elements won't be used, trim the array.
- array->GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(
- *backing_store, capacity - length);
- } else {
- // Otherwise, fill the unused tail with holes.
- for (uint32_t i = length; i < old_length; i++) {
- BackingStore::cast(*backing_store)->set_the_hole(i);
- }
- }
- } else {
- // Check whether the backing store should be expanded.
- capacity = Max(length, JSObject::NewElementsCapacity(capacity));
- ElementsAccessorSubclass::GrowCapacityAndConvertImpl(array, capacity);
- }
-
- array->set_length(Smi::FromInt(length));
- JSObject::ValidateElements(array);
-}
} // namespace
@@ -1978,6 +2269,67 @@ void ElementsAccessor::TearDown() {
}
+Handle<JSArray> ElementsAccessor::Concat(Isolate* isolate, Arguments* args,
+ uint32_t concat_size) {
+ int result_len = 0;
+ ElementsKind elements_kind = GetInitialFastElementsKind();
+ bool has_double = false;
+ {
+ DisallowHeapAllocation no_gc;
+ // Iterate through all the arguments performing checks
+ // and calculating total length.
+ bool is_holey = false;
+ for (uint32_t i = 0; i < concat_size; i++) {
+ Object* arg = (*args)[i];
+ int len = Smi::cast(JSArray::cast(arg)->length())->value();
+
+ // We shouldn't overflow when adding another len.
+ const int kHalfOfMaxInt = 1 << (kBitsPerInt - 2);
+ STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
+ USE(kHalfOfMaxInt);
+ result_len += len;
+ DCHECK(0 <= result_len);
+ DCHECK(result_len <= FixedDoubleArray::kMaxLength);
+
+ ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
+ has_double = has_double || IsFastDoubleElementsKind(arg_kind);
+ is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
+ elements_kind = GetMoreGeneralElementsKind(elements_kind, arg_kind);
+ }
+ if (is_holey) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
+ }
+
+ // If a double array is concatted into a fast elements array, the fast
+ // elements array needs to be initialized to contain proper holes, since
+ // boxing doubles may cause incremental marking.
+ ArrayStorageAllocationMode mode =
+ has_double && IsFastObjectElementsKind(elements_kind)
+ ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+ : DONT_INITIALIZE_ARRAY_ELEMENTS;
+ Handle<JSArray> result_array = isolate->factory()->NewJSArray(
+ elements_kind, result_len, result_len, Strength::WEAK, mode);
+ if (result_len == 0) return result_array;
+ int j = 0;
+ Handle<FixedArrayBase> storage(result_array->elements(), isolate);
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
+ for (uint32_t i = 0; i < concat_size; i++) {
+ // It is crucial to keep |array| in a raw pointer form to avoid
+ // performance degradation.
+ JSArray* array = JSArray::cast((*args)[i]);
+ int len = Smi::cast(array->length())->value();
+ if (len > 0) {
+ ElementsKind from_kind = array->GetElementsKind();
+ accessor->CopyElements(array, 0, from_kind, storage, j, len);
+ j += len;
+ }
+ }
+
+ DCHECK(j == result_len);
+ return result_array;
+}
+
ElementsAccessor** ElementsAccessor::elements_accessors_ = NULL;
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 0131f0baf0..fcc90024ba 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -38,6 +38,11 @@ class ElementsAccessor {
return HasElement(holder, index, handle(holder->elements()));
}
+ // Returns true if the backing store is compact in the given range
+ virtual bool IsPacked(Handle<JSObject> holder,
+ Handle<FixedArrayBase> backing_store, uint32_t start,
+ uint32_t end) = 0;
+
virtual Handle<Object> Get(Handle<FixedArrayBase> backing_store,
uint32_t entry) = 0;
@@ -60,9 +65,6 @@ class ElementsAccessor {
// destination array with the hole.
static const int kCopyToEndAndInitializeToHole = -2;
- static const int kDirectionForward = 1;
- static const int kDirectionReverse = -1;
-
// Copy elements from one backing store to another. Typically, callers specify
// the source JSObject or JSArray in source_holder. If the holder's backing
// store is available, it can be passed in source and source_holder is
@@ -98,9 +100,9 @@ class ElementsAccessor {
virtual void GrowCapacityAndConvert(Handle<JSObject> object,
uint32_t capacity) = 0;
- virtual Handle<FixedArray> AddElementsToFixedArray(
- Handle<JSObject> receiver, Handle<FixedArray> to,
- FixedArray::KeyFilter filter) = 0;
+ virtual void AddElementsToKeyAccumulator(Handle<JSObject> receiver,
+ KeyAccumulator* accumulator,
+ FixedArray::KeyFilter filter) = 0;
// Returns a shared ElementsAccessor for the specified ElementsKind.
static ElementsAccessor* ForKind(ElementsKind elements_kind) {
@@ -125,11 +127,31 @@ class ElementsAccessor {
Handle<Object> value, PropertyAttributes attributes,
uint32_t new_capacity) = 0;
- // TODO(cbruni): Consider passing Arguments* instead of Object** depending on
- // the requirements of future callers.
+ static Handle<JSArray> Concat(Isolate* isolate, Arguments* args,
+ uint32_t concat_size);
+
virtual uint32_t Push(Handle<JSArray> receiver,
- Handle<FixedArrayBase> backing_store, Object** objects,
- uint32_t start, int direction) = 0;
+ Handle<FixedArrayBase> backing_store, Arguments* args,
+ uint32_t push_size) = 0;
+
+ virtual uint32_t Unshift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ Arguments* args, uint32_t unshift_size) = 0;
+
+ virtual Handle<JSArray> Slice(Handle<JSObject> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t end) = 0;
+
+ virtual Handle<JSArray> Splice(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store,
+ uint32_t start, uint32_t delete_count,
+ Arguments* args, uint32_t add_count) = 0;
+
+ virtual Handle<Object> Pop(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) = 0;
+
+ virtual Handle<Object> Shift(Handle<JSArray> receiver,
+ Handle<FixedArrayBase> backing_store) = 0;
protected:
friend class LookupIterator;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index c2033777f2..526390bd18 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -7,6 +7,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/vm-state-inl.h"
@@ -50,33 +51,14 @@ static void PrintDeserializedCodeInfo(Handle<JSFunction> function) {
}
-MUST_USE_RESULT static MaybeHandle<Object> Invoke(
- bool is_construct,
- Handle<JSFunction> function,
- Handle<Object> receiver,
- int argc,
- Handle<Object> args[]) {
- Isolate* isolate = function->GetIsolate();
+namespace {
- // api callbacks can be called directly.
- if (!is_construct && function->shared()->IsApiFunction()) {
- SaveContext save(isolate);
- isolate->set_context(function->context());
- if (receiver->IsGlobalObject()) {
- receiver = handle(Handle<GlobalObject>::cast(receiver)->global_proxy());
- }
- DCHECK(function->context()->global_object()->IsGlobalObject());
- auto value = Builtins::InvokeApiFunction(function, receiver, argc, args);
- bool has_exception = value.is_null();
- DCHECK(has_exception == isolate->has_pending_exception());
- if (has_exception) {
- isolate->ReportPendingMessages();
- return MaybeHandle<Object>();
- } else {
- isolate->clear_pending_message();
- }
- return value;
- }
+MUST_USE_RESULT MaybeHandle<Object> Invoke(Isolate* isolate, bool is_construct,
+ Handle<Object> target,
+ Handle<Object> receiver, int argc,
+ Handle<Object> args[],
+ Handle<Object> new_target) {
+ DCHECK(!receiver->IsGlobalObject());
// Entering JavaScript.
VMState<JS> state(isolate);
@@ -90,27 +72,14 @@ MUST_USE_RESULT static MaybeHandle<Object> Invoke(
// Placeholder for return value.
Object* value = NULL;
- typedef Object* (*JSEntryFunction)(byte* entry,
- Object* function,
- Object* receiver,
- int argc,
+ typedef Object* (*JSEntryFunction)(Object* new_target, Object* target,
+ Object* receiver, int argc,
Object*** args);
Handle<Code> code = is_construct
? isolate->factory()->js_construct_entry_code()
: isolate->factory()->js_entry_code();
- // Convert calls on global objects to be calls on the global
- // receiver instead to avoid having a 'this' pointer which refers
- // directly to a global object.
- if (receiver->IsGlobalObject()) {
- receiver = handle(Handle<GlobalObject>::cast(receiver)->global_proxy());
- }
-
- // Make sure that the global object of the context we're about to
- // make the current one is indeed a global object.
- DCHECK(function->context()->global_object()->IsGlobalObject());
-
{
// Save and restore context around invocation and block the
// allocation of handles without explicit handle scopes.
@@ -119,13 +88,14 @@ MUST_USE_RESULT static MaybeHandle<Object> Invoke(
JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry());
// Call the function through the right JS entry stub.
- byte* function_entry = function->code()->entry();
- JSFunction* func = *function;
+ Object* orig_func = *new_target;
+ Object* func = *target;
Object* recv = *receiver;
Object*** argv = reinterpret_cast<Object***>(args);
- if (FLAG_profile_deserialization) PrintDeserializedCodeInfo(function);
- value =
- CALL_GENERATED_CODE(stub_entry, function_entry, func, recv, argc, argv);
+ if (FLAG_profile_deserialization && target->IsJSFunction()) {
+ PrintDeserializedCodeInfo(Handle<JSFunction>::cast(target));
+ }
+ value = CALL_GENERATED_CODE(stub_entry, orig_func, func, recv, argc, argv);
}
#ifdef VERIFY_HEAP
@@ -151,39 +121,67 @@ MUST_USE_RESULT static MaybeHandle<Object> Invoke(
return Handle<Object>(value, isolate);
}
+} // namespace
+
-MaybeHandle<Object> Execution::Call(Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool convert_receiver) {
- if (!callable->IsJSFunction()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, callable, TryGetFunctionDelegate(isolate, callable), Object);
+// static
+MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
+ Handle<Object> receiver, int argc,
+ Handle<Object> argv[]) {
+ // Convert calls on global objects to be calls on the global
+ // receiver instead to avoid having a 'this' pointer which refers
+ // directly to a global object.
+ if (receiver->IsGlobalObject()) {
+ receiver =
+ handle(Handle<GlobalObject>::cast(receiver)->global_proxy(), isolate);
}
- Handle<JSFunction> func = Handle<JSFunction>::cast(callable);
-
- // In sloppy mode, convert receiver.
- if (convert_receiver && !receiver->IsJSReceiver() &&
- !func->shared()->native() && is_sloppy(func->shared()->language_mode())) {
- if (receiver->IsUndefined() || receiver->IsNull()) {
- receiver = handle(func->global_proxy());
- DCHECK(!receiver->IsJSBuiltinsObject());
+
+ // api callbacks can be called directly.
+ if (callable->IsJSFunction() &&
+ Handle<JSFunction>::cast(callable)->shared()->IsApiFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
+ SaveContext save(isolate);
+ isolate->set_context(function->context());
+ // Do proper receiver conversion for non-strict mode api functions.
+ if (!receiver->IsJSReceiver() &&
+ is_sloppy(function->shared()->language_mode())) {
+ if (receiver->IsUndefined() || receiver->IsNull()) {
+ receiver = handle(function->global_proxy(), isolate);
+ } else {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, receiver, Execution::ToObject(isolate, receiver), Object);
+ }
+ }
+ DCHECK(function->context()->global_object()->IsGlobalObject());
+ auto value = Builtins::InvokeApiFunction(function, receiver, argc, argv);
+ bool has_exception = value.is_null();
+ DCHECK(has_exception == isolate->has_pending_exception());
+ if (has_exception) {
+ isolate->ReportPendingMessages();
+ return MaybeHandle<Object>();
} else {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, receiver, ToObject(isolate, receiver), Object);
+ isolate->clear_pending_message();
}
+ return value;
}
+ return Invoke(isolate, false, callable, receiver, argc, argv,
+ isolate->factory()->undefined_value());
+}
- return Invoke(false, func, receiver, argc, argv);
+
+// static
+MaybeHandle<Object> Execution::New(Handle<JSFunction> constructor, int argc,
+ Handle<Object> argv[]) {
+ return New(constructor->GetIsolate(), constructor, constructor, argc, argv);
}
-MaybeHandle<Object> Execution::New(Handle<JSFunction> func,
- int argc,
+// static
+MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
+ Handle<Object> new_target, int argc,
Handle<Object> argv[]) {
- return Invoke(true, func, handle(func->global_proxy()), argc, argv);
+ return Invoke(isolate, true, constructor,
+ isolate->factory()->undefined_value(), argc, argv, new_target);
}
@@ -204,7 +202,7 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
catcher.SetVerbose(false);
catcher.SetCaptureMessage(false);
- maybe_result = Invoke(false, func, receiver, argc, args);
+ maybe_result = Call(isolate, func, receiver, argc, args);
if (maybe_result.is_null()) {
DCHECK(catcher.HasCaught());
@@ -231,116 +229,6 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
}
-Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
- Handle<Object> object) {
- DCHECK(!object->IsJSFunction());
- Factory* factory = isolate->factory();
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a function.
-
- // If object is a function proxy, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_function_delegate());
- }
-
- return factory->undefined_value();
-}
-
-
-MaybeHandle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
- Handle<Object> object) {
- DCHECK(!object->IsJSFunction());
-
- // If object is a function proxy, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_function_delegate());
- }
-
- // If the Object doesn't have an instance-call handler we should
- // throw a non-callable exception.
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCalledNonCallable, object),
- Object);
-}
-
-
-Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
- Handle<Object> object) {
- DCHECK(!object->IsJSFunction());
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a constructor.
-
- // If object is a function proxies, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_constructor_delegate());
- }
-
- return isolate->factory()->undefined_value();
-}
-
-
-MaybeHandle<Object> Execution::TryGetConstructorDelegate(
- Isolate* isolate, Handle<Object> object) {
- DCHECK(!object->IsJSFunction());
-
- // If you return a function from here, it will be called when an
- // attempt is made to call the given object as a constructor.
-
- // If object is a function proxies, get its handler. Iterate if necessary.
- Object* fun = *object;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
- }
- if (fun->IsJSFunction()) return Handle<Object>(fun, isolate);
-
- // Objects created through the API can have an instance-call handler
- // that should be used when calling the object as a function.
- if (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler()) {
- return Handle<JSFunction>(
- isolate->native_context()->call_as_constructor_delegate());
- }
-
- // If the Object doesn't have an instance-call handler we should
- // throw a non-callable exception.
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kCalledNonCallable, object),
- Object);
-}
-
-
void StackGuard::SetStackLimit(uintptr_t limit) {
ExecutionAccess access(isolate_);
// If the current limits are special (e.g. due to a pending interrupt) then
@@ -426,6 +314,9 @@ void StackGuard::RequestInterrupt(InterruptFlag flag) {
// Not intercepted. Set as active interrupt flag.
thread_local_.interrupt_flags_ |= flag;
set_interrupt_limits(access);
+
+ // If this isolate is waiting in a futex, notify it to wake up.
+ isolate_->futex_wait_list_node()->NotifyWake();
}
@@ -541,36 +432,12 @@ void StackGuard::InitThread(const ExecutionAccess& lock) {
} while (false)
-MaybeHandle<Object> Execution::ToNumber(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_number, { obj });
-}
-
-
-MaybeHandle<Object> Execution::ToString(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_string, { obj });
-}
-
-
MaybeHandle<Object> Execution::ToDetailString(
Isolate* isolate, Handle<Object> obj) {
RETURN_NATIVE_CALL(to_detail_string, { obj });
}
-MaybeHandle<Object> Execution::ToInteger(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_integer, { obj });
-}
-
-
-MaybeHandle<Object> Execution::ToLength(
- Isolate* isolate, Handle<Object> obj) {
- RETURN_NATIVE_CALL(to_length, { obj });
-}
-
-
MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
Handle<Object> time_obj = isolate->factory()->NewNumber(time);
RETURN_NATIVE_CALL(create_date, { time_obj });
@@ -580,13 +447,6 @@ MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
#undef RETURN_NATIVE_CALL
-MaybeHandle<Object> Execution::ToInt32(Isolate* isolate, Handle<Object> obj) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Execution::ToNumber(isolate, obj),
- Object);
- return isolate->factory()->NewNumberFromInt(DoubleToInt32(obj->Number()));
-}
-
-
MaybeHandle<Object> Execution::ToObject(Isolate* isolate, Handle<Object> obj) {
Handle<JSReceiver> receiver;
if (JSReceiver::ToObject(isolate, obj).ToHandle(&receiver)) {
@@ -597,13 +457,6 @@ MaybeHandle<Object> Execution::ToObject(Isolate* isolate, Handle<Object> obj) {
}
-MaybeHandle<Object> Execution::ToUint32(Isolate* isolate, Handle<Object> obj) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Execution::ToNumber(isolate, obj),
- Object);
- return isolate->factory()->NewNumberFromUint(DoubleToUint32(obj->Number()));
-}
-
-
MaybeHandle<JSRegExp> Execution::NewJSRegExp(Handle<String> pattern,
Handle<String> flags) {
Isolate* isolate = pattern->GetIsolate();
@@ -636,7 +489,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
}
-void StackGuard::CheckAndHandleGCInterrupt() {
+void StackGuard::HandleGCInterrupt() {
if (CheckAndClearInterrupt(GC_REQUEST)) {
isolate_->heap()->HandleGCRequest();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index d783e5c28b..84f106a496 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -19,32 +19,25 @@ class JSRegExp;
class Execution final : public AllStatic {
public:
// Call a function, the caller supplies a receiver and an array
- // of arguments. Arguments are Object* type. After function returns,
- // pointers in 'args' might be invalid.
+ // of arguments.
//
- // *pending_exception tells whether the invoke resulted in
- // a pending exception.
+ // When the function called is not in strict mode, receiver is
+ // converted to an object.
//
- // When convert_receiver is set, and the receiver is not an object,
- // and the function called is not in strict mode, receiver is converted to
- // an object.
- //
- MUST_USE_RESULT static MaybeHandle<Object> Call(
- Isolate* isolate,
- Handle<Object> callable,
- Handle<Object> receiver,
- int argc,
- Handle<Object> argv[],
- bool convert_receiver = false);
+ MUST_USE_RESULT static MaybeHandle<Object> Call(Isolate* isolate,
+ Handle<Object> callable,
+ Handle<Object> receiver,
+ int argc,
+ Handle<Object> argv[]);
// Construct object from function, the caller supplies an array of
- // arguments. Arguments are Object* type. After function returns,
- // pointers in 'args' might be invalid.
- //
- // *pending_exception tells whether the invoke resulted in
- // a pending exception.
- //
- MUST_USE_RESULT static MaybeHandle<Object> New(Handle<JSFunction> func,
+ // arguments.
+ MUST_USE_RESULT static MaybeHandle<Object> New(Handle<JSFunction> constructor,
+ int argc,
+ Handle<Object> argv[]);
+ MUST_USE_RESULT static MaybeHandle<Object> New(Isolate* isolate,
+ Handle<Object> constructor,
+ Handle<Object> new_target,
int argc,
Handle<Object> argv[]);
@@ -59,31 +52,6 @@ class Execution final : public AllStatic {
Handle<Object> argv[],
MaybeHandle<Object>* exception_out = NULL);
- // ECMA-262 9.3
- MUST_USE_RESULT static MaybeHandle<Object> ToNumber(
- Isolate* isolate, Handle<Object> obj);
-
- // ECMA-262 9.4
- MUST_USE_RESULT static MaybeHandle<Object> ToInteger(
- Isolate* isolate, Handle<Object> obj);
-
- // ECMA-262 9.5
- MUST_USE_RESULT static MaybeHandle<Object> ToInt32(
- Isolate* isolate, Handle<Object> obj);
-
- // ECMA-262 9.6
- MUST_USE_RESULT static MaybeHandle<Object> ToUint32(
- Isolate* isolate, Handle<Object> obj);
-
-
- // ES6, draft 10-14-14, section 7.1.15
- MUST_USE_RESULT static MaybeHandle<Object> ToLength(
- Isolate* isolate, Handle<Object> obj);
-
- // ECMA-262 9.8
- MUST_USE_RESULT static MaybeHandle<Object> ToString(
- Isolate* isolate, Handle<Object> obj);
-
// ECMA-262 9.8
MUST_USE_RESULT static MaybeHandle<Object> ToDetailString(
Isolate* isolate, Handle<Object> obj);
@@ -100,26 +68,10 @@ class Execution final : public AllStatic {
MUST_USE_RESULT static MaybeHandle<JSRegExp> NewJSRegExp(
Handle<String> pattern, Handle<String> flags);
- static Handle<Object> GetFunctionFor();
static Handle<String> GetStackTraceLine(Handle<Object> recv,
Handle<JSFunction> fun,
Handle<Object> pos,
Handle<Object> is_global);
-
- // Get a function delegate (or undefined) for the given non-function
- // object. Used for support calling objects as functions.
- static Handle<Object> GetFunctionDelegate(Isolate* isolate,
- Handle<Object> object);
- MUST_USE_RESULT static MaybeHandle<Object> TryGetFunctionDelegate(
- Isolate* isolate,
- Handle<Object> object);
-
- // Get a function delegate (or undefined) for the given non-function
- // object. Used for support calling objects as constructors.
- static Handle<Object> GetConstructorDelegate(Isolate* isolate,
- Handle<Object> object);
- static MaybeHandle<Object> TryGetConstructorDelegate(Isolate* isolate,
- Handle<Object> object);
};
@@ -200,10 +152,7 @@ class StackGuard final {
// If the stack guard is triggered, but it is not an actual
// stack overflow, then handle the interruption accordingly.
Object* HandleInterrupts();
-
- bool InterruptRequested() { return GetCurrentStackPosition() < climit(); }
-
- void CheckAndHandleGCInterrupt();
+ void HandleGCInterrupt();
private:
StackGuard();
diff --git a/deps/v8/src/expression-classifier.h b/deps/v8/src/expression-classifier.h
index 17a377890a..fb45f41fa1 100644
--- a/deps/v8/src/expression-classifier.h
+++ b/deps/v8/src/expression-classifier.h
@@ -28,28 +28,37 @@ class ExpressionClassifier {
enum TargetProduction {
ExpressionProduction = 1 << 0,
- BindingPatternProduction = 1 << 1,
- AssignmentPatternProduction = 1 << 2,
- DistinctFormalParametersProduction = 1 << 3,
- StrictModeFormalParametersProduction = 1 << 4,
- StrongModeFormalParametersProduction = 1 << 5,
- ArrowFormalParametersProduction = 1 << 6,
-
+ FormalParameterInitializerProduction = 1 << 1,
+ BindingPatternProduction = 1 << 2,
+ AssignmentPatternProduction = 1 << 3,
+ DistinctFormalParametersProduction = 1 << 4,
+ StrictModeFormalParametersProduction = 1 << 5,
+ StrongModeFormalParametersProduction = 1 << 6,
+ ArrowFormalParametersProduction = 1 << 7,
+
+ ExpressionProductions =
+ (ExpressionProduction | FormalParameterInitializerProduction),
PatternProductions =
(BindingPatternProduction | AssignmentPatternProduction),
FormalParametersProductions = (DistinctFormalParametersProduction |
StrictModeFormalParametersProduction |
StrongModeFormalParametersProduction),
- StandardProductions = ExpressionProduction | PatternProductions,
+ StandardProductions = ExpressionProductions | PatternProductions,
AllProductions = (StandardProductions | FormalParametersProductions |
ArrowFormalParametersProduction)
};
+ enum FunctionProperties { NonSimpleParameter = 1 << 0 };
+
ExpressionClassifier()
- : invalid_productions_(0), duplicate_finder_(nullptr) {}
+ : invalid_productions_(0),
+ function_properties_(0),
+ duplicate_finder_(nullptr) {}
explicit ExpressionClassifier(DuplicateFinder* duplicate_finder)
- : invalid_productions_(0), duplicate_finder_(duplicate_finder) {}
+ : invalid_productions_(0),
+ function_properties_(0),
+ duplicate_finder_(duplicate_finder) {}
bool is_valid(unsigned productions) const {
return (invalid_productions_ & productions) == 0;
@@ -59,6 +68,10 @@ class ExpressionClassifier {
bool is_valid_expression() const { return is_valid(ExpressionProduction); }
+ bool is_valid_formal_parameter_initializer() const {
+ return is_valid(FormalParameterInitializerProduction);
+ }
+
bool is_valid_binding_pattern() const {
return is_valid(BindingPatternProduction);
}
@@ -89,6 +102,10 @@ class ExpressionClassifier {
const Error& expression_error() const { return expression_error_; }
+ const Error& formal_parameter_initializer_error() const {
+ return formal_parameter_initializer_error_;
+ }
+
const Error& binding_pattern_error() const { return binding_pattern_error_; }
const Error& assignment_pattern_error() const {
@@ -111,6 +128,14 @@ class ExpressionClassifier {
return strong_mode_formal_parameter_error_;
}
+ bool is_simple_parameter_list() const {
+ return !(function_properties_ & NonSimpleParameter);
+ }
+
+ void RecordNonSimpleParameter() {
+ function_properties_ |= NonSimpleParameter;
+ }
+
void RecordExpressionError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -121,6 +146,16 @@ class ExpressionClassifier {
expression_error_.arg = arg;
}
+ void RecordFormalParameterInitializerError(const Scanner::Location& loc,
+ MessageTemplate::Template message,
+ const char* arg = nullptr) {
+ if (!is_valid_formal_parameter_initializer()) return;
+ invalid_productions_ |= FormalParameterInitializerProduction;
+ formal_parameter_initializer_error_.location = loc;
+ formal_parameter_initializer_error_.message = message;
+ formal_parameter_initializer_error_.arg = arg;
+ }
+
void RecordBindingPatternError(const Scanner::Location& loc,
MessageTemplate::Template message,
const char* arg = nullptr) {
@@ -198,6 +233,9 @@ class ExpressionClassifier {
invalid_productions_ |= errors;
if (errors & ExpressionProduction)
expression_error_ = inner.expression_error_;
+ if (errors & FormalParameterInitializerProduction)
+ formal_parameter_initializer_error_ =
+ inner.formal_parameter_initializer_error_;
if (errors & BindingPatternProduction)
binding_pattern_error_ = inner.binding_pattern_error_;
if (errors & AssignmentPatternProduction)
@@ -216,16 +254,23 @@ class ExpressionClassifier {
// As an exception to the above, the result continues to be a valid arrow
// formal parameters if the inner expression is a valid binding pattern.
if (productions & ArrowFormalParametersProduction &&
- is_valid_arrow_formal_parameters() &&
- !inner.is_valid_binding_pattern()) {
- invalid_productions_ |= ArrowFormalParametersProduction;
- arrow_formal_parameters_error_ = inner.binding_pattern_error_;
+ is_valid_arrow_formal_parameters()) {
+ // Also copy function properties if expecting an arrow function
+ // parameter.
+ function_properties_ |= inner.function_properties_;
+
+ if (!inner.is_valid_binding_pattern()) {
+ invalid_productions_ |= ArrowFormalParametersProduction;
+ arrow_formal_parameters_error_ = inner.binding_pattern_error_;
+ }
}
}
private:
unsigned invalid_productions_;
+ unsigned function_properties_;
Error expression_error_;
+ Error formal_parameter_initializer_error_;
Error binding_pattern_error_;
Error assignment_pattern_error_;
Error arrow_formal_parameters_error_;
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 3eaa70e515..9241e9f4de 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -98,7 +98,7 @@ void ExternalizeStringExtension::Externalize(
result = string->MakeExternal(resource);
if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- isolate->heap()->external_string_table()->AddString(*string);
+ isolate->heap()->RegisterExternalString(*string);
}
if (!result) delete resource;
} else {
@@ -109,7 +109,7 @@ void ExternalizeStringExtension::Externalize(
result = string->MakeExternal(resource);
if (result) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
- isolate->heap()->external_string_table()->AddString(*string);
+ isolate->heap()->RegisterExternalString(*string);
}
if (!result) delete resource;
}
diff --git a/deps/v8/src/extensions/free-buffer-extension.cc b/deps/v8/src/extensions/free-buffer-extension.cc
index b642b3df30..5bd56312a5 100644
--- a/deps/v8/src/extensions/free-buffer-extension.cc
+++ b/deps/v8/src/extensions/free-buffer-extension.cc
@@ -5,7 +5,7 @@
#include "src/extensions/free-buffer-extension.h"
#include "src/base/platform/platform.h"
-#include "src/v8.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 495167695a..76dcd433af 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -4,7 +4,9 @@
#include "src/extensions/statistics-extension.h"
-#include "src/v8.h"
+#include "src/counters.h"
+#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/trigger-failure-extension.cc b/deps/v8/src/extensions/trigger-failure-extension.cc
index 672c1a7064..9aa8c2dcfe 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.cc
+++ b/deps/v8/src/extensions/trigger-failure-extension.cc
@@ -3,7 +3,9 @@
// found in the LICENSE file.
#include "src/extensions/trigger-failure-extension.h"
-#include "src/v8.h"
+
+#include "src/base/logging.h"
+#include "src/checks.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index db11ebb390..8923d071b1 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -8,12 +8,51 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/conversions.h"
+#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
+// Calls the FUNCTION_CALL function and retries it up to three times
+// to guarantee that any allocations performed during the call will
+// succeed if there's enough memory.
+//
+// Warning: Do not use the identifiers __object__, __maybe_object__,
+// __allocation__ or __scope__ in a call to this macro.
+
+#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
+ if (__allocation__.To(&__object__)) { \
+ DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
+ return Handle<TYPE>(TYPE::cast(__object__), ISOLATE); \
+ }
+
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
+ do { \
+ AllocationResult __allocation__ = FUNCTION_CALL; \
+ Object* __object__ = NULL; \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
+ /* Two GCs before panicking. In newspace will almost always succeed. */ \
+ for (int __i__ = 0; __i__ < 2; __i__++) { \
+ (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
+ "allocation failure"); \
+ __allocation__ = FUNCTION_CALL; \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
+ } \
+ (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
+ (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
+ { \
+ AlwaysAllocateScope __scope__(ISOLATE); \
+ __allocation__ = FUNCTION_CALL; \
+ } \
+ RETURN_OBJECT_UNLESS_RETRY(ISOLATE, TYPE) \
+ /* TODO(1181417): Fix this. */ \
+ v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+ return Handle<TYPE>(); \
+ } while (false)
+
+
template<typename T>
Handle<T> Factory::New(Handle<Map> map, AllocationSpace space) {
CALL_HEAP_FUNCTION(
@@ -62,6 +101,19 @@ Handle<PrototypeInfo> Factory::NewPrototypeInfo() {
}
+Handle<SloppyBlockWithEvalContextExtension>
+Factory::NewSloppyBlockWithEvalContextExtension(
+ Handle<ScopeInfo> scope_info, Handle<JSObject> extension) {
+ DCHECK(scope_info->is_declaration_scope());
+ Handle<SloppyBlockWithEvalContextExtension> result =
+ Handle<SloppyBlockWithEvalContextExtension>::cast(
+ NewStruct(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE));
+ result->set_scope_info(*scope_info);
+ result->set_extension(*extension);
+ return result;
+}
+
+
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -526,30 +578,10 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
NewRawTwoByteString(length).ToHandleChecked(), left, right);
}
- return (is_one_byte || is_one_byte_data_in_two_byte_string)
- ? NewOneByteConsString(length, left, right)
- : NewTwoByteConsString(length, left, right);
-}
-
-
-MaybeHandle<String> Factory::NewOneByteConsString(int length,
- Handle<String> left,
- Handle<String> right) {
- return NewRawConsString(cons_one_byte_string_map(), length, left, right);
-}
-
-
-MaybeHandle<String> Factory::NewTwoByteConsString(int length,
- Handle<String> left,
- Handle<String> right) {
- return NewRawConsString(cons_string_map(), length, left, right);
-}
-
-
-MaybeHandle<String> Factory::NewRawConsString(Handle<Map> map, int length,
- Handle<String> left,
- Handle<String> right) {
- Handle<ConsString> result = New<ConsString>(map, NEW_SPACE);
+ Handle<ConsString> result =
+ (is_one_byte || is_one_byte_data_in_two_byte_string)
+ ? New<ConsString>(cons_one_byte_string_map(), NEW_SPACE)
+ : New<ConsString>(cons_string_map(), NEW_SPACE);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
@@ -837,17 +869,18 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_source(*source);
script->set_name(heap->undefined_value());
script->set_id(isolate()->heap()->NextScriptId());
- script->set_line_offset(Smi::FromInt(0));
- script->set_column_offset(Smi::FromInt(0));
+ script->set_line_offset(0);
+ script->set_column_offset(0);
script->set_context_data(heap->undefined_value());
- script->set_type(Smi::FromInt(Script::TYPE_NORMAL));
+ script->set_type(Script::TYPE_NORMAL);
script->set_wrapper(heap->undefined_value());
script->set_line_ends(heap->undefined_value());
script->set_eval_from_shared(heap->undefined_value());
- script->set_eval_from_instructions_offset(Smi::FromInt(0));
+ script->set_eval_from_instructions_offset(0);
script->set_shared_function_infos(Smi::FromInt(0));
- script->set_flags(Smi::FromInt(0));
+ script->set_flags(0);
+ heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
return script;
}
@@ -873,12 +906,13 @@ Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
}
-Handle<BytecodeArray> Factory::NewBytecodeArray(int length,
- const byte* raw_bytecodes,
- int frame_size) {
+Handle<BytecodeArray> Factory::NewBytecodeArray(
+ int length, const byte* raw_bytecodes, int frame_size, int parameter_count,
+ Handle<FixedArray> constant_pool) {
DCHECK(0 <= length);
CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateBytecodeArray(
- length, raw_bytecodes, frame_size),
+ length, raw_bytecodes, frame_size,
+ parameter_count, *constant_pool),
BytecodeArray);
}
@@ -1121,6 +1155,22 @@ Handle<Object> Factory::NewError(Handle<JSFunction> constructor,
}
+#define DEFINE_ERROR(NAME, name) \
+ Handle<Object> Factory::New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> arg0, Handle<Object> arg1, \
+ Handle<Object> arg2) { \
+ return NewError(isolate()->name##_function(), template_index, arg0, arg1, \
+ arg2); \
+ }
+DEFINE_ERROR(Error, error)
+DEFINE_ERROR(EvalError, eval_error)
+DEFINE_ERROR(RangeError, range_error)
+DEFINE_ERROR(ReferenceError, reference_error)
+DEFINE_ERROR(SyntaxError, syntax_error)
+DEFINE_ERROR(TypeError, type_error)
+#undef DEFINE_ERROR
+
+
void Factory::InitializeFunction(Handle<JSFunction> function,
Handle<SharedFunctionInfo> info,
Handle<Context> context) {
@@ -1282,17 +1332,26 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
context->native_context(), BailoutId::None());
if (cached.code != nullptr) {
// Caching of optimized code enabled and optimized code found.
- if (cached.literals != nullptr) result->set_literals(cached.literals);
DCHECK(!cached.code->marked_for_deoptimization());
DCHECK(result->shared()->is_compiled());
result->ReplaceCode(cached.code);
}
- if (cached.literals == nullptr && !info->bound()) {
+ if (cached.literals != nullptr) {
+ result->set_literals(cached.literals);
+
+ } else if (!info->bound()) {
int number_of_literals = info->num_literals();
- // TODO(mstarzinger): Consider sharing the newly created literals array.
- Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
+ Handle<LiteralsArray> literals =
+ LiteralsArray::New(isolate(), handle(info->feedback_vector()),
+ number_of_literals, pretenure);
result->set_literals(*literals);
+ // Cache context-specific literals.
+ if (FLAG_cache_optimized_code) {
+ Handle<Context> native_context(context->native_context());
+ SharedFunctionInfo::AddToOptimizedCodeMap(
+ info, native_context, undefined_value(), literals, BailoutId::None());
+ }
}
return result;
@@ -1594,15 +1653,15 @@ Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
}
-Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared) {
+Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
+ PretenureFlag pretenure) {
Handle<JSFunction> array_buffer_fun(
shared == SharedFlag::kShared
? isolate()->native_context()->shared_array_buffer_fun()
: isolate()->native_context()->array_buffer_fun());
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*array_buffer_fun),
- JSArrayBuffer);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObject(
+ *array_buffer_fun, pretenure),
+ JSArrayBuffer);
}
@@ -1619,7 +1678,7 @@ Handle<JSDataView> Factory::NewJSDataView() {
Handle<JSMap> Factory::NewJSMap() {
Handle<Map> map(isolate()->native_context()->js_map_map());
Handle<JSMap> js_map = Handle<JSMap>::cast(NewJSObjectFromMap(map));
- Runtime::JSMapInitialize(isolate(), js_map);
+ JSMap::Initialize(js_map, isolate());
return js_map;
}
@@ -1627,7 +1686,7 @@ Handle<JSMap> Factory::NewJSMap() {
Handle<JSSet> Factory::NewJSSet() {
Handle<Map> map(isolate()->native_context()->js_set_map());
Handle<JSSet> js_set = Handle<JSSet>::cast(NewJSObjectFromMap(map));
- Runtime::JSSetInitialize(isolate(), js_set);
+ JSSet::Initialize(js_set, isolate());
return js_set;
}
@@ -1648,6 +1707,16 @@ Handle<JSSetIterator> Factory::NewJSSetIterator() {
}
+Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
+ Handle<Object> done) {
+ Handle<JSIteratorResult> result = Handle<JSIteratorResult>::cast(
+ NewJSObjectFromMap(isolate()->iterator_result_map()));
+ result->set_value(*value);
+ result->set_done(*done);
+ return result;
+}
+
+
namespace {
ElementsKind GetExternalArrayElementsKind(ExternalArrayType type) {
@@ -1742,18 +1811,19 @@ JSFunction* GetTypedArrayFun(ElementsKind elements_kind, Isolate* isolate) {
void SetupArrayBufferView(i::Isolate* isolate,
i::Handle<i::JSArrayBufferView> obj,
i::Handle<i::JSArrayBuffer> buffer,
- size_t byte_offset, size_t byte_length) {
+ size_t byte_offset, size_t byte_length,
+ PretenureFlag pretenure = NOT_TENURED) {
DCHECK(byte_offset + byte_length <=
static_cast<size_t>(buffer->byte_length()->Number()));
obj->set_buffer(*buffer);
i::Handle<i::Object> byte_offset_object =
- isolate->factory()->NewNumberFromSize(byte_offset);
+ isolate->factory()->NewNumberFromSize(byte_offset, pretenure);
obj->set_byte_offset(*byte_offset_object);
i::Handle<i::Object> byte_length_object =
- isolate->factory()->NewNumberFromSize(byte_length);
+ isolate->factory()->NewNumberFromSize(byte_length, pretenure);
obj->set_byte_length(*byte_length_object);
}
@@ -1761,31 +1831,32 @@ void SetupArrayBufferView(i::Isolate* isolate,
} // namespace
-Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type) {
+Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
+ PretenureFlag pretenure) {
Handle<JSFunction> typed_array_fun_handle(GetTypedArrayFun(type, isolate()));
- CALL_HEAP_FUNCTION(
- isolate(),
- isolate()->heap()->AllocateJSObject(*typed_array_fun_handle),
- JSTypedArray);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObject(
+ *typed_array_fun_handle, pretenure),
+ JSTypedArray);
}
-Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind) {
+Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
+ PretenureFlag pretenure) {
Handle<JSFunction> typed_array_fun_handle(
GetTypedArrayFun(elements_kind, isolate()));
- CALL_HEAP_FUNCTION(
- isolate(), isolate()->heap()->AllocateJSObject(*typed_array_fun_handle),
- JSTypedArray);
+ CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSObject(
+ *typed_array_fun_handle, pretenure),
+ JSTypedArray);
}
Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
- size_t byte_offset,
- size_t length) {
- Handle<JSTypedArray> obj = NewJSTypedArray(type);
+ size_t byte_offset, size_t length,
+ PretenureFlag pretenure) {
+ Handle<JSTypedArray> obj = NewJSTypedArray(type, pretenure);
size_t element_size = GetExternalArrayElementSize(type);
ElementsKind elements_kind = GetExternalArrayElementsKind(type);
@@ -1795,14 +1866,15 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
CHECK(length <= (std::numeric_limits<size_t>::max() / element_size));
CHECK(length <= static_cast<size_t>(Smi::kMaxValue));
size_t byte_length = length * element_size;
- SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length);
+ SetupArrayBufferView(isolate(), obj, buffer, byte_offset, byte_length,
+ pretenure);
- Handle<Object> length_object = NewNumberFromSize(length);
+ Handle<Object> length_object = NewNumberFromSize(length, pretenure);
obj->set_length(*length_object);
Handle<FixedTypedArrayBase> elements = NewFixedTypedArrayWithExternalPointer(
static_cast<int>(length), type,
- static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset, pretenure);
Handle<Map> map = JSObject::GetElementsTransitionMap(obj, elements_kind);
JSObject::SetMapAndElements(obj, map, elements);
return obj;
@@ -1810,8 +1882,9 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
- size_t number_of_elements) {
- Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind);
+ size_t number_of_elements,
+ PretenureFlag pretenure) {
+ Handle<JSTypedArray> obj = NewJSTypedArray(elements_kind, pretenure);
size_t element_size = GetFixedTypedArraysElementSize(elements_kind);
ExternalArrayType array_type = GetArrayTypeFromElementsKind(elements_kind);
@@ -1823,18 +1896,19 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ElementsKind elements_kind,
obj->set_byte_offset(Smi::FromInt(0));
i::Handle<i::Object> byte_length_object =
- isolate()->factory()->NewNumberFromSize(byte_length);
+ NewNumberFromSize(byte_length, pretenure);
obj->set_byte_length(*byte_length_object);
- Handle<Object> length_object = NewNumberFromSize(number_of_elements);
+ Handle<Object> length_object =
+ NewNumberFromSize(number_of_elements, pretenure);
obj->set_length(*length_object);
- Handle<JSArrayBuffer> buffer = isolate()->factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBuffer(isolate(), buffer, true, NULL, byte_length,
- SharedFlag::kNotShared);
+ Handle<JSArrayBuffer> buffer =
+ NewJSArrayBuffer(SharedFlag::kNotShared, pretenure);
+ JSArrayBuffer::Setup(buffer, isolate(), true, NULL, byte_length,
+ SharedFlag::kNotShared);
obj->set_buffer(*buffer);
- Handle<FixedTypedArrayBase> elements =
- isolate()->factory()->NewFixedTypedArray(
- static_cast<int>(number_of_elements), array_type, true);
+ Handle<FixedTypedArrayBase> elements = NewFixedTypedArray(
+ static_cast<int>(number_of_elements), array_type, true, pretenure);
obj->set_elements(*elements);
return obj;
}
@@ -1867,7 +1941,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
- Handle<Object> call_trap,
+ Handle<JSReceiver> call_trap,
Handle<Object> construct_trap,
Handle<Object> prototype) {
// Allocate map.
@@ -1875,6 +1949,8 @@ Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
// maps. Will probably depend on the identity of the handler object, too.
Handle<Map> map = NewMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
Map::SetPrototype(map, prototype);
+ map->set_is_callable();
+ map->set_is_constructor(construct_trap->IsCallable());
// Allocate the proxy object.
Handle<JSFunctionProxy> result = New<JSFunctionProxy>(map, NEW_SPACE);
@@ -1936,7 +2012,8 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
// Functions require some minimal initialization.
if (type == JS_FUNCTION_TYPE) {
- map->set_function_with_prototype(true);
+ map->set_is_constructor(true);
+ map->set_is_callable();
Handle<JSFunction> js_function = Handle<JSFunction>::cast(proxy);
InitializeFunction(js_function, shared.ToHandleChecked(), context);
} else {
@@ -2001,9 +2078,9 @@ void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
- const ZoneFeedbackVectorSpec* spec);
-template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
const FeedbackVectorSpec* spec);
+template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
+ const StaticFeedbackVectorSpec* spec);
template <typename Spec>
Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(const Spec* spec) {
@@ -2071,7 +2148,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
- FeedbackVectorSpec empty_spec(0);
+ StaticFeedbackVectorSpec empty_spec;
Handle<TypeFeedbackVector> feedback_vector =
NewTypeFeedbackVector(&empty_spec);
share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 2c3d687786..b7602e023b 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -11,7 +11,7 @@
namespace v8 {
namespace internal {
-class FeedbackVectorSpec;
+class StaticFeedbackVectorSpec;
// Interface for handle based allocation.
class Factory final {
@@ -54,6 +54,11 @@ class Factory final {
// Create a new PrototypeInfo struct.
Handle<PrototypeInfo> NewPrototypeInfo();
+ // Create a new SloppyBlockWithEvalContextExtension struct.
+ Handle<SloppyBlockWithEvalContextExtension>
+ NewSloppyBlockWithEvalContextExtension(Handle<ScopeInfo> scope_info,
+ Handle<JSObject> extension);
+
// Create a pre-tenured empty AccessorPair.
Handle<AccessorPair> NewAccessorPair();
@@ -151,24 +156,21 @@ class Factory final {
// Allocates an internalized string in old space based on the character
// stream.
- MUST_USE_RESULT Handle<String> NewInternalizedStringFromUtf8(
- Vector<const char> str,
- int chars,
- uint32_t hash_field);
+ Handle<String> NewInternalizedStringFromUtf8(Vector<const char> str,
+ int chars, uint32_t hash_field);
- MUST_USE_RESULT Handle<String> NewOneByteInternalizedString(
- Vector<const uint8_t> str, uint32_t hash_field);
+ Handle<String> NewOneByteInternalizedString(Vector<const uint8_t> str,
+ uint32_t hash_field);
- MUST_USE_RESULT Handle<String> NewOneByteInternalizedSubString(
+ Handle<String> NewOneByteInternalizedSubString(
Handle<SeqOneByteString> string, int offset, int length,
uint32_t hash_field);
- MUST_USE_RESULT Handle<String> NewTwoByteInternalizedString(
- Vector<const uc16> str,
- uint32_t hash_field);
+ Handle<String> NewTwoByteInternalizedString(Vector<const uc16> str,
+ uint32_t hash_field);
- MUST_USE_RESULT Handle<String> NewInternalizedStringImpl(
- Handle<String> string, int chars, uint32_t hash_field);
+ Handle<String> NewInternalizedStringImpl(Handle<String> string, int chars,
+ uint32_t hash_field);
// Compute the matching internalized string map for a string if possible.
// Empty handle is returned if string is in new space or not flattened.
@@ -192,14 +194,6 @@ class Factory final {
// Create a new cons string object which consists of a pair of strings.
MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
Handle<String> right);
- MUST_USE_RESULT MaybeHandle<String> NewOneByteConsString(
- int length, Handle<String> left, Handle<String> right);
- MUST_USE_RESULT MaybeHandle<String> NewTwoByteConsString(
- int length, Handle<String> left, Handle<String> right);
- MUST_USE_RESULT MaybeHandle<String> NewRawConsString(Handle<Map> map,
- int length,
- Handle<String> left,
- Handle<String> right);
// Create a new string object which holds a proper substring of a string.
Handle<String> NewProperSubString(Handle<String> str,
@@ -283,7 +277,8 @@ class Factory final {
PretenureFlag pretenure = NOT_TENURED);
Handle<BytecodeArray> NewBytecodeArray(int length, const byte* raw_bytecodes,
- int frame_size);
+ int frame_size, int parameter_count,
+ Handle<FixedArray> constant_pool);
Handle<FixedTypedArrayBase> NewFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
@@ -443,20 +438,25 @@ class Factory final {
Handle<JSGeneratorObject> NewJSGeneratorObject(Handle<JSFunction> function);
Handle<JSArrayBuffer> NewJSArrayBuffer(
- SharedFlag shared = SharedFlag::kNotShared);
+ SharedFlag shared = SharedFlag::kNotShared,
+ PretenureFlag pretenure = NOT_TENURED);
- Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type);
+ Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
+ PretenureFlag pretenure = NOT_TENURED);
- Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind);
+ Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind,
+ PretenureFlag pretenure = NOT_TENURED);
// Creates a new JSTypedArray with the specified buffer.
Handle<JSTypedArray> NewJSTypedArray(ExternalArrayType type,
Handle<JSArrayBuffer> buffer,
- size_t byte_offset, size_t length);
+ size_t byte_offset, size_t length,
+ PretenureFlag pretenure = NOT_TENURED);
// Creates a new on-heap JSTypedArray.
Handle<JSTypedArray> NewJSTypedArray(ElementsKind elements_kind,
- size_t number_of_elements);
+ size_t number_of_elements,
+ PretenureFlag pretenure = NOT_TENURED);
Handle<JSDataView> NewJSDataView();
Handle<JSDataView> NewJSDataView(Handle<JSArrayBuffer> buffer,
@@ -469,12 +469,17 @@ class Factory final {
Handle<JSMapIterator> NewJSMapIterator();
Handle<JSSetIterator> NewJSSetIterator();
+ // Creates a new JSIteratorResult object with the arguments {value} and
+ // {done}. Implemented according to ES6 section 7.4.7 CreateIterResultObject.
+ Handle<JSIteratorResult> NewJSIteratorResult(Handle<Object> value,
+ Handle<Object> done);
+
// Allocates a Harmony proxy.
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
// Allocates a Harmony function proxy.
Handle<JSProxy> NewJSFunctionProxy(Handle<Object> handler,
- Handle<Object> call_trap,
+ Handle<JSReceiver> call_trap,
Handle<Object> construct_trap,
Handle<Object> prototype);
@@ -551,21 +556,17 @@ class Factory final {
Handle<Object> arg1 = Handle<Object>(),
Handle<Object> arg2 = Handle<Object>());
-#define DEFINE_ERROR(NAME, name) \
- Handle<Object> New##NAME(MessageTemplate::Template template_index, \
- Handle<Object> arg0 = Handle<Object>(), \
- Handle<Object> arg1 = Handle<Object>(), \
- Handle<Object> arg2 = Handle<Object>()) { \
- return NewError(isolate()->name##_function(), template_index, arg0, arg1, \
- arg2); \
- }
-
- DEFINE_ERROR(Error, error)
- DEFINE_ERROR(EvalError, eval_error)
- DEFINE_ERROR(RangeError, range_error)
- DEFINE_ERROR(ReferenceError, reference_error)
- DEFINE_ERROR(SyntaxError, syntax_error)
- DEFINE_ERROR(TypeError, type_error)
+#define DECLARE_ERROR(NAME) \
+ Handle<Object> New##NAME(MessageTemplate::Template template_index, \
+ Handle<Object> arg0 = Handle<Object>(), \
+ Handle<Object> arg1 = Handle<Object>(), \
+ Handle<Object> arg2 = Handle<Object>());
+ DECLARE_ERROR(Error)
+ DECLARE_ERROR(EvalError)
+ DECLARE_ERROR(RangeError)
+ DECLARE_ERROR(ReferenceError)
+ DECLARE_ERROR(SyntaxError)
+ DECLARE_ERROR(TypeError)
#undef DEFINE_ERROR
Handle<String> NumberToString(Handle<Object> number,
@@ -609,7 +610,7 @@ class Factory final {
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
-#define SYMBOL_ACCESSOR(name, varname, description) \
+#define SYMBOL_ACCESSOR(name, description) \
inline Handle<Symbol> name() { \
return Handle<Symbol>(bit_cast<Symbol**>( \
&isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
@@ -617,18 +618,6 @@ class Factory final {
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
- inline void set_string_table(Handle<StringTable> table) {
- isolate()->heap()->set_string_table(*table);
- }
-
- inline void set_weak_stack_trace_list(Handle<WeakFixedArray> list) {
- isolate()->heap()->set_weak_stack_trace_list(*list);
- }
-
- Handle<String> hidden_string() {
- return Handle<String>(&isolate()->heap()->hidden_string_);
- }
-
// Allocates a new SharedFunctionInfo object.
Handle<SharedFunctionInfo> NewSharedFunctionInfo(
Handle<String> name, int number_of_literals, FunctionKind kind,
diff --git a/deps/v8/src/fast-dtoa.h b/deps/v8/src/fast-dtoa.h
index d96c296f15..38e8a82499 100644
--- a/deps/v8/src/fast-dtoa.h
+++ b/deps/v8/src/fast-dtoa.h
@@ -5,6 +5,8 @@
#ifndef V8_FAST_DTOA_H_
#define V8_FAST_DTOA_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/fixed-dtoa.h b/deps/v8/src/fixed-dtoa.h
index b6495c11ef..0a6cb50818 100644
--- a/deps/v8/src/fixed-dtoa.h
+++ b/deps/v8/src/fixed-dtoa.h
@@ -5,6 +5,8 @@
#ifndef V8_FIXED_DTOA_H_
#define V8_FIXED_DTOA_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 2b12e65789..e9e1363939 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -167,6 +167,9 @@ struct MaybeBoolFlag {
//
#define FLAG FLAG_FULL
+DEFINE_BOOL(experimental_extras, false,
+ "enable code compiled in via v8_experimental_extra_library_files")
+
// Flags for language modes and experimental language features.
DEFINE_BOOL(use_strict, false, "enforce strict mode")
DEFINE_BOOL(use_strong, false, "enforce strong mode")
@@ -189,32 +192,31 @@ DEFINE_BOOL(legacy_const, true, "legacy semantics for const in sloppy mode")
V(harmony_modules, "harmony modules") \
V(harmony_regexps, "harmony regular expression extensions") \
V(harmony_proxies, "harmony proxies") \
- V(harmony_sloppy_function, "harmony sloppy function block scoping") \
- V(harmony_sloppy_let, "harmony let in sloppy mode") \
V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_tolength, "harmony ToLength") \
V(harmony_reflect, "harmony Reflect API") \
+ V(harmony_sloppy_function, "harmony sloppy function block scoping") \
V(harmony_destructuring, "harmony destructuring") \
V(harmony_default_parameters, "harmony default parameters") \
V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_atomics, "harmony atomics") \
V(harmony_simd, "harmony simd")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_tostring, "harmony toString") \
- V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
- V(harmony_rest_parameters, "harmony rest parameters") \
- V(harmony_sloppy, "harmony features in sloppy mode")
+#define HARMONY_STAGED(V) \
+ V(harmony_tostring, "harmony toString") \
+ V(harmony_concat_spreadable, "harmony isConcatSpreadable") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_sloppy_let, "harmony let in sloppy mode")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_arrow_functions, "harmony arrow functions") \
- V(harmony_new_target, "harmony new.target") \
- V(harmony_object_observe, "harmony Object.observe") \
- V(harmony_spreadcalls, "harmony spread-calls") \
- V(harmony_spread_arrays, "harmony spread in array literals") \
- V(harmony_object, "harmony Object methods")
+#define HARMONY_SHIPPING(V) \
+ V(harmony_arrow_functions, "harmony arrow functions") \
+ V(harmony_array_includes, "harmony Array.prototype.includes") \
+ V(harmony_new_target, "harmony new.target") \
+ V(harmony_object_observe, "harmony Object.observe") \
+ V(harmony_rest_parameters, "harmony rest parameters") \
+ V(harmony_spread_calls, "harmony spread-calls") \
+ V(harmony_spread_arrays, "harmony spread in array literals")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -242,14 +244,15 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
// Feature dependencies.
DEFINE_IMPLICATION(harmony_sloppy_let, harmony_sloppy)
+DEFINE_IMPLICATION(harmony_sloppy_function, harmony_sloppy)
+// Destructuring shares too much parsing architecture with default parameters
+// to be enabled on its own.
+DEFINE_IMPLICATION(harmony_destructuring, harmony_default_parameters)
// Flags for experimental implementation features.
DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
-// TODO(hpayer): We will remove this flag as soon as we have pretenuring
-// support for specific allocation sites.
-DEFINE_BOOL(pretenuring_call_new, false, "pretenure call new")
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(trace_pretenuring, false,
@@ -281,6 +284,7 @@ DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition, false, "use ignition interpreter")
+DEFINE_IMPLICATION(ignition, vector_stores)
DEFINE_STRING(ignition_filter, "~~", "filter for ignition interpreter")
DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
@@ -399,9 +403,10 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
DEFINE_BOOL(turbo, false, "enable TurboFan compiler")
DEFINE_BOOL(turbo_shipping, true, "enable TurboFan compiler on subset")
DEFINE_BOOL(turbo_greedy_regalloc, false, "use the greedy register allocator")
-DEFINE_BOOL(turbo_preprocess_ranges, false,
+DEFINE_BOOL(turbo_preprocess_ranges, true,
"run pre-register allocation heuristics")
-DEFINE_IMPLICATION(turbo_greedy_regalloc, turbo_preprocess_ranges)
+DEFINE_BOOL(turbo_loop_stackcheck, true, "enable stack checks in loops")
+
DEFINE_IMPLICATION(turbo, turbo_asm_deoptimization)
DEFINE_STRING(turbo_filter, "~~", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
@@ -426,8 +431,8 @@ DEFINE_BOOL(turbo_allocate, false, "enable inline allocations in TurboFan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
-DEFINE_BOOL(context_specialization, false,
- "enable context specialization in TurboFan")
+DEFINE_BOOL(function_context_specialization, false,
+ "enable function context specialization in TurboFan")
DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
@@ -437,7 +442,6 @@ DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
-DEFINE_BOOL(turbo_try_catch, true, "enable try-catch support in TurboFan")
DEFINE_BOOL(turbo_try_finally, false, "enable try-finally support in TurboFan")
DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
@@ -656,6 +660,7 @@ DEFINE_INT(min_progress_during_object_groups_marking, 128,
DEFINE_INT(max_object_groups_marking_rounds, 3,
"at most try this many times to over approximate the weak closure")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
+DEFINE_BOOL(parallel_compaction, false, "use parallel compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
@@ -671,6 +676,7 @@ DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
#ifdef VERIFY_HEAP
DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
#endif
+DEFINE_BOOL(move_object_start, false, "enable moving of object starts")
// counters.cc
DEFINE_INT(histogram_interval, 600000,
@@ -689,7 +695,7 @@ DEFINE_BOOL(use_idle_notification, true,
DEFINE_BOOL(use_ic, true, "use inline caching")
DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
DEFINE_BOOL(vector_stores, false, "use vectors for store ics")
-DEFINE_BOOL(global_var_shortcuts, false, "use ic-less global loads and stores")
+DEFINE_BOOL(global_var_shortcuts, true, "use ic-less global loads and stores")
// macro-assembler-ia32.cc
DEFINE_BOOL(native_code_counters, false,
@@ -793,6 +799,8 @@ DEFINE_STRING(testing_serialization_file, "/tmp/serdes",
#endif
// mksnapshot.cc
+DEFINE_STRING(startup_src, NULL,
+ "Write V8 startup as C++ src. (mksnapshot only)")
DEFINE_STRING(startup_blob, NULL,
"Write V8 startup blob file. (mksnapshot only)")
@@ -804,6 +812,7 @@ DEFINE_BOOL(predictable, false, "enable predictable mode")
DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
// mark-compact.cc
DEFINE_BOOL(force_marking_deque_overflows, false,
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 6f28ebb037..f67defd5a9 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -2,27 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/flags.h"
+
#include <cctype>
#include <cstdlib>
#include <sstream>
-#include "src/v8.h"
-
+#include "src/allocation.h"
#include "src/assembler.h"
#include "src/base/functional.h"
#include "src/base/platform/platform.h"
+#include "src/list-inl.h"
#include "src/ostreams.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
// Define all of our flags.
#define FLAG_MODE_DEFINE
-#include "src/flag-definitions.h" // NOLINT
+#include "src/flag-definitions.h" // NOLINT(build/include)
// Define all of our flags default values.
#define FLAG_MODE_DEFINE_DEFAULTS
-#include "src/flag-definitions.h" // NOLINT
+#include "src/flag-definitions.h" // NOLINT(build/include)
namespace {
@@ -161,7 +164,7 @@ struct Flag {
Flag flags[] = {
#define FLAG_MODE_META
-#include "src/flag-definitions.h"
+#include "src/flag-definitions.h" // NOLINT(build/include)
};
const size_t num_flags = sizeof(flags) / sizeof(*flags);
@@ -567,7 +570,7 @@ void ComputeFlagListHash() {
// static
void FlagList::EnforceFlagImplications() {
#define FLAG_MODE_DEFINE_IMPLICATIONS
-#include "src/flag-definitions.h"
+#include "src/flag-definitions.h" // NOLINT(build/include)
#undef FLAG_MODE_DEFINE_IMPLICATIONS
ComputeFlagListHash();
}
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index e69bcd9033..cccd4d191a 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -7,6 +7,7 @@
#include "src/frames.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/v8memory.h"
#if V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 8561e557e8..7e55833b45 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -19,7 +19,6 @@
namespace v8 {
namespace internal {
-
ReturnAddressLocationResolver
StackFrame::return_address_location_resolver_ = NULL;
@@ -185,7 +184,7 @@ bool StackTraceFrameIterator::IsValidFrame() {
Object* script = frame()->function()->shared()->script();
// Don't show functions from native scripts to user.
return (script->IsScript() &&
- Script::TYPE_NATIVE != Script::cast(script)->type()->value());
+ Script::TYPE_NATIVE != Script::cast(script)->type());
}
@@ -407,42 +406,58 @@ void StackFrame::SetReturnAddressLocationResolver(
StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
State* state) {
DCHECK(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
+
+ if (!iterator->can_access_heap_objects_) {
+ // TODO(titzer): "can_access_heap_objects" is kind of bogus. It really
+ // means that we are being called from the profiler, which can interrupt
+ // the VM with a signal at any arbitrary instruction, with essentially
+ // anything on the stack. So basically none of these checks are 100%
+ // reliable.
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ // An adapter frame has a special SMI constant for the context and
+ // is not distinguished through the marker.
+ return ARGUMENTS_ADAPTOR;
+ }
+ Object* marker =
+ Memory::Object_at(state->fp + StandardFrameConstants::kMarkerOffset);
+ if (marker->IsSmi()) {
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+ } else {
+ return JAVA_SCRIPT;
+ }
}
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) {
- // If we're using a "safe" stack iterator, we treat optimized
- // frames as normal JavaScript frames to avoid having to look
- // into the heap to determine the state. This is safe as long
- // as nobody tries to GC...
- if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT;
- Code* code_obj =
- GetContainingCode(iterator->isolate(), *(state->pc_address));
+
+ // Look up the code object to figure out the type of the stack frame.
+ Code* code_obj = GetContainingCode(iterator->isolate(), *(state->pc_address));
+
+ Object* marker =
+ Memory::Object_at(state->fp + StandardFrameConstants::kMarkerOffset);
+ if (code_obj != nullptr) {
switch (code_obj->kind()) {
case Code::FUNCTION:
return JAVA_SCRIPT;
-
- case Code::HANDLER:
-#ifdef DEBUG
- if (!code_obj->is_hydrogen_stub()) {
- // There's currently no support for non-hydrogen stub handlers. If
- // you this, you'll have to implement it yourself.
- UNREACHABLE();
- }
-#endif
case Code::OPTIMIZED_FUNCTION:
return OPTIMIZED;
-
+ case Code::HANDLER:
+ if (!marker->IsSmi()) {
+ // Only hydrogen code stub handlers can have a non-SMI marker.
+ DCHECK(code_obj->is_hydrogen_stub());
+ return OPTIMIZED;
+ }
+ break; // Marker encodes the frame type.
default:
- UNREACHABLE();
- return JAVA_SCRIPT;
+ break; // Marker encodes the frame type.
}
}
+
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ // An adapter frame has a special SMI constant for the context and
+ // is not distinguished through the marker.
+ return ARGUMENTS_ADAPTOR;
+ }
+
+ // Didn't find a code object, or the code kind wasn't specific enough.
+ // The marker should encode the frame type.
return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
@@ -727,6 +742,13 @@ bool JavaScriptFrame::IsConstructor() const {
}
+bool JavaScriptFrame::HasInlinedFrames() {
+ List<JSFunction*> functions(1);
+ GetFunctions(&functions);
+ return functions.length() > 1;
+}
+
+
Object* JavaScriptFrame::GetOriginalConstructor() const {
Address fp = caller_fp();
if (has_adapted_arguments()) {
@@ -879,6 +901,15 @@ void JavaScriptFrame::RestoreOperandStack(FixedArray* store) {
}
+FrameSummary::FrameSummary(Object* receiver, JSFunction* function, Code* code,
+ int offset, bool is_constructor)
+ : receiver_(receiver, function->GetIsolate()),
+ function_(function),
+ code_(code),
+ offset_(offset),
+ is_constructor_(is_constructor) {}
+
+
void FrameSummary::Print() {
PrintF("receiver: ");
receiver_->ShortPrint();
@@ -1429,6 +1460,11 @@ Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
Address inner_pointer) {
Heap* heap = isolate_->heap();
+ if (!heap->code_space()->Contains(inner_pointer) &&
+ !heap->lo_space()->Contains(inner_pointer)) {
+ return nullptr;
+ }
+
// Check if the inner pointer points into a large object chunk.
LargePage* large_page = heap->lo_space()->FindPage(inner_pointer);
if (large_page != NULL) {
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 72250e37a1..d6bfd7aab8 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -175,6 +175,15 @@ class ConstructFrameConstants : public AllStatic {
};
+class InterpreterFrameConstants : public AllStatic {
+ public:
+ // Register file pointer relative.
+ static const int kLastParamFromRegisterPointer =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
+ static const int kFunctionFromRegisterPointer = kPointerSize;
+};
+
+
// Abstract base class for all stack frames.
class StackFrame BASE_EMBEDDED {
public:
@@ -515,16 +524,9 @@ class StandardFrame: public StackFrame {
class FrameSummary BASE_EMBEDDED {
public:
- FrameSummary(Object* receiver,
- JSFunction* function,
- Code* code,
- int offset,
- bool is_constructor)
- : receiver_(receiver, function->GetIsolate()),
- function_(function),
- code_(code),
- offset_(offset),
- is_constructor_(is_constructor) { }
+ FrameSummary(Object* receiver, JSFunction* function, Code* code, int offset,
+ bool is_constructor);
+
Handle<Object> receiver() { return receiver_; }
Handle<JSFunction> function() { return function_; }
Handle<Code> code() { return code_; }
@@ -574,6 +576,10 @@ class JavaScriptFrame: public StandardFrame {
// Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
+ // Determines whether this frame includes inlined activations. To get details
+ // about the inlined frames use {GetFunctions} and {Summarize}.
+ bool HasInlinedFrames();
+
// Returns the original constructor function that was used in the constructor
// call to this frame. Note that this is only valid on constructor frames.
Object* GetOriginalConstructor() const;
diff --git a/deps/v8/src/full-codegen/OWNERS b/deps/v8/src/full-codegen/OWNERS
index 4464e8fd28..19e4ed6b6e 100644
--- a/deps/v8/src/full-codegen/OWNERS
+++ b/deps/v8/src/full-codegen/OWNERS
@@ -1,8 +1,10 @@
set noparent
bmeurer@chromium.org
+ishell@chromium.org
jarin@chromium.org
jkummerow@chromium.org
mstarzinger@chromium.org
+mvstanton@chromium.org
verwaest@chromium.org
yangguo@chromium.org
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 56f7e62ded..d3d53334d4 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -105,7 +105,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -120,8 +120,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ldr(r2, MemOperand(sp, receiver_offset));
@@ -156,7 +155,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
__ cmp(r9, Operand(r2));
__ b(hs, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
@@ -182,7 +181,7 @@ void FullCodeGenerator::Generate() {
}
}
- bool function_in_register = true;
+ bool function_in_register_r1 = true;
// Possibly allocate a local context.
if (info->scope()->num_heap_slots() > 0) {
@@ -203,7 +202,7 @@ void FullCodeGenerator::Generate() {
__ push(r1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- function_in_register = false;
+ function_in_register_r1 = false;
// Context is returned in r0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, r0);
@@ -236,14 +235,19 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register_r1| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
+ if (!function_in_register_r1) {
__ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, r1, r0, r2);
}
@@ -259,6 +263,7 @@ void FullCodeGenerator::Generate() {
__ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
__ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
Label non_construct_frame, done;
+ function_in_register_r1 = false;
__ b(ne, &non_construct_frame);
__ ldr(r0,
@@ -272,64 +277,36 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, r0, r2, r3);
}
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r2, Operand(Smi::FromInt(num_parameters)));
- __ mov(r1, Operand(Smi::FromInt(rest_index)));
- __ mov(r0, Operand(Smi::FromInt(language_mode())));
- __ Push(r3, r2, r1, r0);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, r0, r1, r2);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
+ DCHECK(r1.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register_r1) {
// Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(r3, r1);
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
-
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(num_parameters)));
- __ Push(r3, r2, r1);
+ __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ add(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
// Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
SetVar(arguments, r0, r1, r2);
}
-
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
@@ -367,7 +344,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -494,7 +471,7 @@ void FullCodeGenerator::EmitReturnSequence() {
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
- SetReturnPosition(function());
+ SetReturnPosition(literal());
// TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
PredictableCodeSizeScope predictable(masm_, -1);
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
@@ -790,7 +767,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ b(&skip);
@@ -1147,9 +1124,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- __ Move(r1, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(r1);
__ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
+ int vector_index = SmiFromSlot(slot)->value();
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@@ -1259,28 +1236,37 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ mov(r2, Operand(info));
__ CallStub(&stub);
} else {
- __ mov(r0, Operand(info));
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, r0, r1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ Push(info);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(r0);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ ldr(StoreDescriptor::ValueRegister(),
- MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ ldr(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), r0);
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ ldr(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1454,67 +1440,26 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ GetVar(r0, var);
+ __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ b(ne, &done);
+ __ mov(r0, Operand(var->name()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
} else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
- }
- context()->Plug(r0);
- break;
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
}
+ context()->Plug(r0);
+ break;
}
context()->Plug(var);
break;
@@ -1553,8 +1498,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r0 = RegExp literal clone
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ ldr(r5, FieldMemOperand(r4, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r5, ip);
@@ -1591,12 +1535,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ push(r1);
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1626,10 +1577,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1657,7 +1604,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
@@ -1665,14 +1612,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
- __ Move(StoreDescriptor::ReceiverRegister(), r0);
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ ldr(StoreDescriptor::ValueRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1685,8 +1625,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
__ push(r0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1705,12 +1646,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1725,13 +1666,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ mov(r0, Operand(Smi::FromInt(NONE)));
__ push(r0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1766,8 +1701,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1813,10 +1749,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(r0);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1908,7 +1840,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(r0);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2332,41 +2265,28 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ Label allocate, done_allocate;
- __ Allocate(instance_size, r0, r2, r3, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &allocate, TAG_OBJECT);
+ __ b(&done_allocate);
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
+ __ bind(&done_allocate);
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(r2);
- __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
- __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
+ __ LoadRoot(r3,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2,
- FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
- __ str(r3,
- FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
- r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ str(r2, FieldMemOperand(r0, JSIteratorResult::kValueOffset));
+ __ str(r3, FieldMemOperand(r0, JSIteratorResult::kDoneOffset));
}
@@ -2506,8 +2426,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in r0.
DCHECK(lit != NULL);
__ push(r0);
@@ -2541,8 +2460,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3264,18 +3184,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ Move(r2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(r2);
__ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(r0);
}
@@ -3308,16 +3224,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ Move(r2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(r2);
__ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3325,6 +3232,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(r0);
}
@@ -3350,60 +3259,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ NonNegativeSmiTst(r0);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(r0, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_false);
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, if_false);
- __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3448,95 +3303,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(r0);
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, &skip_lookup);
-
- // Check for fast case object. Generate false result for slow case object.
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r2, ip);
- __ b(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand::Zero());
- __ b(eq, &done);
-
- __ LoadInstanceDescriptors(r1, r4);
- // r4: descriptor array.
- // r3: valid entries in the descriptor array.
- __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
- __ mul(r3, r3, ip);
- // Calculate location of the first key name.
- __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of ip to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(isolate()->factory()->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ldr(r3, MemOperand(r4, 0));
- __ cmp(r3, ip);
- __ b(eq, if_false);
- __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(r4, Operand(r2));
- __ b(ne, &loop);
-
- __ bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ JumpIfSmi(r2, if_false);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r2, r3);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3999,6 +3765,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(r0, &done_convert);
+ __ Push(r0);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -4011,6 +3794,39 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r0);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ Label convert, done_convert;
+ __ JumpIfSmi(r0, &convert);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CompareObjectType(r0, r1, r1, LAST_NAME_TYPE);
+ __ b(ls, &done_convert);
+ __ bind(&convert);
+ __ Push(r0);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4147,6 +3963,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to r1.
+ int const argc = args->length() - 2;
+ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ mov(r0, Operand(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, r0);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4172,7 +4008,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ push(r0);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(r0);
@@ -4183,22 +4019,18 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
- // Load original constructor into r4.
- __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
+ // Load original constructor into r3.
+ __ ldr(r3, MemOperand(sp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ ldr(r4, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
// default constructor has no arguments, so no adaptor frame means no args.
__ mov(r0, Operand::Zero());
@@ -4218,8 +4050,8 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ bind(&loop);
// Pre-decrement r2 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex));
- __ Push(r3);
+ __ ldr(r4, MemOperand(r2, kPointerSize, NegPreIndex));
+ __ Push(r4);
__ sub(r1, r1, Operand(1));
__ cmp(r1, Operand::Zero());
__ b(ne, &loop);
@@ -4227,14 +4059,12 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ bind(&args_set_up);
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
+ // Restore context register.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(result_register());
+ context()->DropAndPlug(1, r0);
}
@@ -4530,18 +4360,45 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, r0, r2, r3, &runtime, TAG_OBJECT);
+ __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
+ __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ pop(r3);
+ __ pop(r2);
+ __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+ __ str(r2, FieldMemOperand(r0, JSIteratorResult::kValueOffset));
+ __ str(r3, FieldMemOperand(r0, JSIteratorResult::kDoneOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ b(&done);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(r0);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- Register receiver = LoadDescriptor::ReceiverRegister();
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
- __ push(receiver);
+ // Push undefined as the receiver.
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ push(r0);
- // Load the function from the receiver.
- __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ ldr(r0, GlobalObjectOperand());
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ __ ldr(r0, ContextOperand(r0, expr->context_index()));
}
@@ -5029,23 +4886,22 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(r0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ and_(r1, r1,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmp(r1, Operand(1 << Map::kIsCallable));
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(r0, if_false);
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
- // Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, if_false);
- __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, if_false);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ __ tst(r1, Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5086,20 +4942,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ pop(r1);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ tst(r0, r0);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(r0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index 96e8d957d5..b53e8ee6cd 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -104,7 +104,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ Function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -119,8 +119,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kXRegSize;
__ Peek(x10, receiver_offset);
@@ -160,7 +159,7 @@ void FullCodeGenerator::Generate() {
__ Sub(x10, jssp, locals_count * kPointerSize);
__ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
__ B(hs, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ Bind(&ok);
}
__ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
@@ -238,6 +237,11 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register_x1| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
@@ -245,7 +249,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ This function");
if (!function_in_register_x1) {
__ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, x1, x0, x2);
}
@@ -264,6 +268,7 @@ void FullCodeGenerator::Generate() {
__ Bind(&check_frame_marker);
__ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset));
__ Cmp(x1, Smi::FromInt(StackFrame::CONSTRUCT));
+ function_in_register_x1 = false;
Label non_construct_frame, done;
@@ -280,56 +285,30 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, x0, x2, x3);
}
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ Add(x3, fp, StandardFrameConstants::kCallerSPOffset + offset);
- __ Mov(x2, Smi::FromInt(num_parameters));
- __ Mov(x1, Smi::FromInt(rest_index));
- __ Mov(x0, Smi::FromInt(language_mode()));
- __ Push(x3, x2, x1, x0);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, x0, x1, x2);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
+ DCHECK(x1.is(ArgumentsAccessNewDescriptor::function()));
if (!function_in_register_x1) {
// Load this again, if it's used by the local context below.
- __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ Mov(x3, x1);
+ __ Ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset);
- __ Mov(x1, Smi::FromInt(num_parameters));
- __ Push(x3, x2, x1);
+ __ Mov(ArgumentsAccessNewDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ Add(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
+ StandardFrameConstants::kCallerSPOffset + offset);
// Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -373,7 +352,7 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -485,7 +464,7 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ Bind(&ok);
- SetReturnPosition(function());
+ SetReturnPosition(literal());
const Register& current_sp = __ StackPointer();
// Nothing ensures 16 bytes alignment here.
DCHECK(!current_sp.Is(csp));
@@ -789,7 +768,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
// TODO(all): Investigate to see if there is something to work on here.
Label skip;
@@ -1144,9 +1123,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// We got a fixed array in register x0. Iterate through that.
__ Bind(&fixed_array);
- __ LoadObject(x1, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(x1);
__ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
+ int vector_index = SmiFromSlot(slot)->value();
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(vector_index)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
@@ -1254,27 +1233,35 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ Mov(x2, Operand(info));
__ CallStub(&stub);
} else {
- __ Mov(x11, Operand(info));
- __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, x11, x10);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ Push(info);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(x0);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ Peek(StoreDescriptor::ReceiverRegister(), 0);
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Peek(StoreDescriptor::ReceiverRegister(), 0);
+ __ Mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), x0);
+ __ Mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1441,67 +1428,26 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
Comment cmnt(masm_, var->IsContextSlot()
? "Context variable"
: "Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ GetVar(x0, var);
+ Label done;
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Mov(x0, Operand(var->name()));
+ __ Push(x0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ Bind(&done);
} else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(x0, var);
- Label done;
- __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Mov(x0, Operand(var->name()));
- __ Push(x0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ Bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- __ Bind(&done);
- }
- context()->Plug(x0);
- break;
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Bind(&done);
}
+ context()->Plug(x0);
+ break;
}
context()->Plug(var);
break;
@@ -1541,8 +1487,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// x0 = RegExp literal clone
__ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ Ldr(x5, FieldMemOperand(x4, literal_offset));
__ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized);
@@ -1577,12 +1522,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(x10, Heap::kNullValueRootIndex);
__ Push(x10);
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1612,10 +1564,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1643,7 +1591,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreDescriptor::ReceiverRegister(), 0);
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
@@ -1651,14 +1599,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
- __ Mov(StoreDescriptor::ReceiverRegister(), x0);
- __ Mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ Peek(StoreDescriptor::ValueRegister(), 0);
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1670,8 +1611,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ Mov(x0, Smi::FromInt(SLOPPY)); // Language mode
__ Push(x0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1689,12 +1631,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1709,13 +1651,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x10);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ Mov(x10, Smi::FromInt(NONE));
__ Push(x10);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1750,8 +1686,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1797,10 +1734,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(x0);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1889,7 +1822,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(x0);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2203,8 +2137,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in x0.
DCHECK(lit != NULL);
__ push(x0);
@@ -2238,8 +2171,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -2958,18 +2892,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ Peek(x1, arg_count * kXRegSize);
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ LoadObject(x2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(x2);
__ Mov(x3, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(x0);
}
@@ -3002,16 +2932,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ Peek(x1, arg_count * kXRegSize);
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ LoadObject(x2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(x2);
__ Mov(x3, SmiFromSlot(expr->CallFeedbackSlot()));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3019,6 +2940,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(x0);
}
@@ -3043,58 +2966,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(x0, if_false);
- __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset));
- __ Tbnz(x11, Map::kIsUndetectable, if_false);
- __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset));
- __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(lt, if_false);
- __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3139,108 +3010,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- Register object = x0;
- __ AssertNotSmi(object);
-
- Register map = x10;
- Register bitfield2 = x11;
- __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
- __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset));
- __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup);
-
- // Check for fast case object. Generate false result for slow case object.
- Register props = x12;
- Register props_map = x12;
- Register hash_table_map = x13;
- __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset));
- __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset));
- __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex);
- __ Cmp(props_map, hash_table_map);
- __ B(eq, if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if found.
- // Since we omit an enumeration index check, if it is added via a transition
- // that shares its descriptor array, this is a false positive.
- Label loop, done;
-
- // Skip loop if no descriptors are valid.
- Register descriptors = x12;
- Register descriptors_length = x13;
- __ NumberOfOwnDescriptors(descriptors_length, map);
- __ Cbz(descriptors_length, &done);
-
- __ LoadInstanceDescriptors(map, descriptors);
-
- // Calculate the end of the descriptor array.
- Register descriptors_end = x14;
- __ Mov(x15, DescriptorArray::kDescriptorSize);
- __ Mul(descriptors_length, descriptors_length, x15);
- // Calculate location of the first key name.
- __ Add(descriptors, descriptors,
- DescriptorArray::kFirstOffset - kHeapObjectTag);
- // Calculate the end of the descriptor array.
- __ Add(descriptors_end, descriptors,
- Operand(descriptors_length, LSL, kPointerSizeLog2));
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- Register valueof_string = x1;
- int descriptor_size = DescriptorArray::kDescriptorSize * kPointerSize;
- __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string()));
- __ Bind(&loop);
- __ Ldr(x15, MemOperand(descriptors, descriptor_size, PostIndex));
- __ Cmp(x15, valueof_string);
- __ B(eq, if_false);
- __ Cmp(descriptors, descriptors_end);
- __ B(ne, &loop);
-
- __ Bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset));
- __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset));
-
- __ Bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its prototype
- // is the unmodified String prototype. If not result is false.
- Register prototype = x1;
- Register global_idx = x2;
- Register native_context = x2;
- Register string_proto = x3;
- Register proto_map = x4;
- __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset));
- __ JumpIfSmi(prototype, if_false);
- __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset));
- __ Ldr(global_idx, GlobalObjectMemOperand());
- __ Ldr(native_context,
- FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset));
- __ Ldr(string_proto,
- ContextMemOperand(native_context,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ Cmp(proto_map, string_proto);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3706,6 +3475,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into x0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(x0, &done_convert);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -3719,6 +3505,38 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into x0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(x0);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into x0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ Label convert, done_convert;
+ __ JumpIfSmi(x0, &convert);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ JumpIfObjectType(x0, x1, x1, LAST_NAME_TYPE, &done_convert, ls);
+ __ Bind(&convert);
+ __ Push(x0);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ Bind(&done_convert);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -3861,6 +3679,27 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to x1.
+ int const argc = args->length() - 2;
+ __ Peek(x1, (argc + 1) * kXRegSize);
+ // Call the target.
+ __ Mov(x0, argc);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, x0);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
ZoneList<Expression*>* args = expr->arguments();
@@ -3886,7 +3725,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ Bind(&runtime);
__ Push(x0);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ Bind(&done);
context()->Plug(x0);
@@ -3897,16 +3736,12 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
- // Load original constructor into x4.
- __ Peek(x4, 1 * kPointerSize);
+ // Load original constructor into x3.
+ __ Peek(x3, 1 * kPointerSize);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
@@ -3941,14 +3776,12 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ bind(&args_set_up);
__ Peek(x1, Operand(x0, LSL, kPointerSizeLog2));
- __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Restore context register.
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(result_register());
+ context()->DropAndPlug(1, x0);
}
@@ -4226,19 +4059,57 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ Register result = x0;
+ __ Allocate(JSIteratorResult::kSize, result, x10, x11, &runtime, TAG_OBJECT);
+ Register map_reg = x1;
+ Register result_value = x2;
+ Register boolean_done = x3;
+ Register empty_fixed_array = x4;
+ Register untagged_result = x5;
+ __ Ldr(map_reg, GlobalObjectMemOperand());
+ __ Ldr(map_reg, FieldMemOperand(map_reg, GlobalObject::kNativeContextOffset));
+ __ Ldr(map_reg,
+ ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ Pop(boolean_done);
+ __ Pop(result_value);
+ __ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
+ STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
+ JSObject::kElementsOffset);
+ STATIC_ASSERT(JSIteratorResult::kValueOffset + kPointerSize ==
+ JSIteratorResult::kDoneOffset);
+ __ ObjectUntag(untagged_result, result);
+ __ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
+ __ Stp(empty_fixed_array, empty_fixed_array,
+ MemOperand(untagged_result, JSObject::kPropertiesOffset));
+ __ Stp(result_value, boolean_done,
+ MemOperand(untagged_result, JSIteratorResult::kValueOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ B(&done);
+
+ __ Bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ Bind(&done);
+ context()->Plug(x0);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- __ Ldr(x10, GlobalObjectMemOperand());
- __ Ldr(LoadDescriptor::ReceiverRegister(),
- FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
- __ Push(LoadDescriptor::ReceiverRegister());
+ // Push undefined as the receiver.
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ Push(x0);
- // Load the function from the receiver.
- Handle<String> name = expr->name();
- __ Mov(LoadDescriptor::NameRegister(), Operand(name));
- __ Mov(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ Ldr(x0, GlobalObjectMemOperand());
+ __ Ldr(x0, FieldMemOperand(x0, GlobalObject::kNativeContextOffset));
+ __ Ldr(x0, ContextMemOperand(x0, expr->context_index()));
}
@@ -4727,25 +4598,21 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->function_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string");
__ JumpIfSmi(x0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true);
- __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false,
- fall_through);
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset));
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset));
+ __ And(x1, x1, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ __ CompareAndSplit(x1, Operand(1 << Map::kIsCallable), eq, if_true,
+ if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
__ JumpIfSmi(x0, if_false);
__ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
- // Check for JS objects => true.
- Register map = x10;
- __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- if_false, lt);
- __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ B(gt, if_false);
- // Check for undetectable objects => false.
- __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset));
-
- __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false,
- fall_through);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, if_false, lt);
+ // Check for callable or undetectable objects => false.
+ __ Ldrb(x10, FieldMemOperand(x10, Map::kBitFieldOffset));
+ __ TestAndSplit(x10, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable),
+ if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
} else if (String::Equals(check, factory->type##_string())) { \
@@ -4791,19 +4658,20 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(x0, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ Pop(x1);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
break;
}
@@ -5143,26 +5011,19 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ Label allocate, done_allocate;
// Allocate and populate an object with this form: { value: VAL, done: DONE }
Register result = x0;
- __ Allocate(instance_size, result, x10, x11, &gc_required, TAG_OBJECT);
- __ B(&allocated);
+ __ Allocate(JSIteratorResult::kSize, result, x10, x11, &allocate, TAG_OBJECT);
+ __ B(&done_allocate);
- __ Bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ Bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ Ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ Bind(&allocated);
+ __ Bind(&done_allocate);
Register map_reg = x1;
Register result_value = x2;
Register boolean_done = x3;
@@ -5173,24 +5034,20 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ Ldr(map_reg,
ContextMemOperand(map_reg, Context::ITERATOR_RESULT_MAP_INDEX));
__ Pop(result_value);
- __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
- __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
+ __ LoadRoot(boolean_done,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(empty_fixed_array, Heap::kEmptyFixedArrayRootIndex);
STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
JSObject::kElementsOffset);
- STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
- JSGeneratorObject::kResultDonePropertyOffset);
+ STATIC_ASSERT(JSIteratorResult::kValueOffset + kPointerSize ==
+ JSIteratorResult::kDoneOffset);
__ ObjectUntag(untagged_result, result);
__ Str(map_reg, MemOperand(untagged_result, HeapObject::kMapOffset));
__ Stp(empty_fixed_array, empty_fixed_array,
MemOperand(untagged_result, JSObject::kPropertiesOffset));
__ Stp(result_value, boolean_done,
- MemOperand(untagged_result,
- JSGeneratorObject::kResultValuePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset,
- x10, x11, kLRHasBeenSaved, kDontSaveFPRegs);
+ MemOperand(untagged_result, JSIteratorResult::kValueOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
diff --git a/deps/v8/src/full-codegen/full-codegen.cc b/deps/v8/src/full-codegen/full-codegen.cc
index bb7b637d42..a29b59cf11 100644
--- a/deps/v8/src/full-codegen/full-codegen.cc
+++ b/deps/v8/src/full-codegen/full-codegen.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/full-codegen/full-codegen.h"
+
#include "src/ast.h"
#include "src/ast-numbering.h"
#include "src/code-factory.h"
@@ -9,7 +11,7 @@
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
-#include "src/full-codegen/full-codegen.h"
+#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/prettyprinter.h"
#include "src/scopeinfo.h"
@@ -50,8 +52,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
}
unsigned table_offset = cgen.EmitBackEdgeTable();
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
- Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
+ Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, info);
cgen.PopulateDeoptimizationData(code);
cgen.PopulateTypeFeedbackInfo(code);
cgen.PopulateHandlerTable(code);
@@ -90,28 +91,6 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
}
-void FullCodeGenerator::EnsureSlotContainsAllocationSite(
- FeedbackVectorSlot slot) {
- Handle<TypeFeedbackVector> vector = FeedbackVector();
- if (!vector->Get(slot)->IsAllocationSite()) {
- Handle<AllocationSite> allocation_site =
- isolate()->factory()->NewAllocationSite();
- vector->Set(slot, *allocation_site);
- }
-}
-
-
-void FullCodeGenerator::EnsureSlotContainsAllocationSite(
- FeedbackVectorICSlot slot) {
- Handle<TypeFeedbackVector> vector = FeedbackVector();
- if (!vector->Get(slot)->IsAllocationSite()) {
- Handle<AllocationSite> allocation_site =
- isolate()->factory()->NewAllocationSite();
- vector->Set(slot, *allocation_site);
- }
-}
-
-
void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
// Fill in the deoptimization information.
DCHECK(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
@@ -449,6 +428,12 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
}
+void FullCodeGenerator::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* declaration) {
+ Visit(declaration->statement());
+}
+
+
int FullCodeGenerator::DeclareGlobalsFlags() {
DCHECK(DeclareGlobalsLanguageMode::is_valid(language_mode()));
return DeclareGlobalsEvalFlag::encode(is_eval()) |
@@ -497,19 +482,6 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK_EQ(2, args->length());
-
- VisitForStackValue(args->at(0));
- VisitForStackValue(args->at(1));
-
- StringCompareStub stub(isolate());
- __ CallStub(&stub);
- context()->Plug(result_register());
-}
-
-
bool RecordStatementPosition(MacroAssembler* masm, int pos) {
if (pos == RelocInfo::kNoPosition) return false;
masm->positions_recorder()->RecordStatementPosition(pos);
@@ -884,7 +856,7 @@ void FullCodeGenerator::EmitUnwindBeforeReturn() {
void FullCodeGenerator::EmitPropertyKey(ObjectLiteralProperty* property,
BailoutId bailout_id) {
VisitForStackValue(property->key());
- __ InvokeBuiltin(Builtins::TO_NAME, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kToName, 1);
PrepareForBailoutForId(bailout_id, NO_REGISTERS);
__ Push(result_register());
}
@@ -1296,28 +1268,16 @@ void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
__ Push(Smi::FromInt(lit->start_position()));
__ Push(Smi::FromInt(lit->end_position()));
- __ CallRuntime(is_strong(language_mode()) ? Runtime::kDefineClassStrong
- : Runtime::kDefineClass,
- 5);
+ __ CallRuntime(Runtime::kDefineClass, 5);
PrepareForBailoutForId(lit->CreateLiteralId(), TOS_REG);
- int store_slot_index = 0;
- EmitClassDefineProperties(lit, &store_slot_index);
+ EmitClassDefineProperties(lit);
if (lit->scope() != NULL) {
DCHECK_NOT_NULL(lit->class_variable_proxy());
- FeedbackVectorICSlot slot =
- FLAG_vector_stores &&
- lit->class_variable_proxy()->var()->IsUnallocated()
- ? lit->GetNthSlot(store_slot_index++)
- : FeedbackVectorICSlot::Invalid();
EmitVariableAssignment(lit->class_variable_proxy()->var(),
- Token::INIT_CONST, slot);
+ Token::INIT_CONST, lit->ProxySlot());
}
-
- // Verify that compilation exactly consumed the number of store ic slots
- // that the ClassLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == lit->slot_count());
}
context()->Plug(result_register());
@@ -1400,6 +1360,11 @@ void FullCodeGenerator::ExitTryBlock(int handler_index) {
void FullCodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void FullCodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
+ UNREACHABLE();
+}
+
+
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
int* stack_depth, int* context_length) {
// The macros used here must preserve the result register.
@@ -1548,7 +1513,7 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
needs_block_context_ = false;
} else {
- needs_block_context_ = scope->ContextLocalCount() > 0;
+ needs_block_context_ = scope->NeedsContext();
codegen_->scope_ = scope;
{
if (needs_block_context_) {
@@ -1586,6 +1551,65 @@ FullCodeGenerator::EnterBlockScopeIfNeeded::~EnterBlockScopeIfNeeded() {
}
+bool FullCodeGenerator::NeedsHoleCheckForLoad(VariableProxy* proxy) {
+ Variable* var = proxy->var();
+
+ if (!var->binding_needs_init()) {
+ return false;
+ }
+
+ // var->scope() may be NULL when the proxy is located in eval code and
+ // refers to a potential outside binding. Currently those bindings are
+ // always looked up dynamically, i.e. in that case
+ // var->location() == LOOKUP.
+ // always holds.
+ DCHECK(var->scope() != NULL);
+ DCHECK(var->location() == VariableLocation::PARAMETER ||
+ var->location() == VariableLocation::LOCAL ||
+ var->location() == VariableLocation::CONTEXT);
+
+ // Check if the binding really needs an initialization check. The check
+ // can be skipped in the following situation: we have a LET or CONST
+ // binding in harmony mode, both the Variable and the VariableProxy have
+ // the same declaration scope (i.e. they are both in global code, in the
+ // same function or in the same eval code), the VariableProxy is in
+ // the source physically located after the initializer of the variable,
+ // and that the initializer cannot be skipped due to a nonlinear scope.
+ //
+ // We cannot skip any initialization checks for CONST in non-harmony
+ // mode because const variables may be declared but never initialized:
+ // if (false) { const x; }; var y = x;
+ //
+ // The condition on the declaration scopes is a conservative check for
+ // nested functions that access a binding and are called before the
+ // binding is initialized:
+ // function() { f(); let x = 1; function f() { x = 2; } }
+ //
+ // The check cannot be skipped on non-linear scopes, namely switch
+ // scopes, to ensure tests are done in cases like the following:
+ // switch (1) { case 0: let x = 2; case 1: f(x); }
+ // The scope of the variable needs to be checked, in case the use is
+ // in a sub-block which may be linear.
+ if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+ return true;
+ }
+
+ if (var->is_this()) {
+ DCHECK(literal() != nullptr &&
+ (literal()->kind() & kSubclassConstructor) != 0);
+ // TODO(littledan): implement 'this' hole check elimination.
+ return true;
+ }
+
+ // Check that we always have valid source position.
+ DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+ DCHECK(proxy->position() != RelocInfo::kNoPosition);
+
+ return var->mode() == CONST_LEGACY || var->scope()->is_nonlinear() ||
+ var->initializer_position() >= proxy->position();
+}
+
+
#undef __
diff --git a/deps/v8/src/full-codegen/full-codegen.h b/deps/v8/src/full-codegen/full-codegen.h
index 34a5dc0454..02da16b865 100644
--- a/deps/v8/src/full-codegen/full-codegen.h
+++ b/deps/v8/src/full-codegen/full-codegen.h
@@ -76,7 +76,7 @@ class FullCodeGenerator: public AstVisitor {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
static const int kCodeSizeMultiplier = 105;
#elif V8_TARGET_ARCH_X64
- static const int kCodeSizeMultiplier = 170;
+ static const int kCodeSizeMultiplier = 165;
#elif V8_TARGET_ARCH_ARM
static const int kCodeSizeMultiplier = 149;
#elif V8_TARGET_ARCH_ARM64
@@ -420,22 +420,16 @@ class FullCodeGenerator: public AstVisitor {
void PrepareForBailout(Expression* node, State state);
void PrepareForBailoutForId(BailoutId id, State state);
- // Feedback slot support. The feedback vector will be cleared during gc and
- // collected by the type-feedback oracle.
- Handle<TypeFeedbackVector> FeedbackVector() const {
- return info_->feedback_vector();
- }
- void EnsureSlotContainsAllocationSite(FeedbackVectorSlot slot);
- void EnsureSlotContainsAllocationSite(FeedbackVectorICSlot slot);
-
// Returns a smi for the index into the FixedArray that backs the feedback
// vector
Smi* SmiFromSlot(FeedbackVectorSlot slot) const {
- return Smi::FromInt(FeedbackVector()->GetIndex(slot));
+ return Smi::FromInt(TypeFeedbackVector::GetIndexFromSpec(
+ literal()->feedback_vector_spec(), slot));
}
Smi* SmiFromSlot(FeedbackVectorICSlot slot) const {
- return Smi::FromInt(FeedbackVector()->GetIndex(slot));
+ return Smi::FromInt(TypeFeedbackVector::GetIndexFromSpec(
+ literal()->feedback_vector_spec(), slot));
}
// Record a call's return site offset, used to rebuild the frame if the
@@ -486,50 +480,51 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
void EmitKeyedSuperCallWithLoadIC(Call* expr);
-#define FOR_EACH_FULL_CODE_INTRINSIC(F) \
- F(IsSmi) \
- F(IsNonNegativeSmi) \
- F(IsArray) \
- F(IsTypedArray) \
- F(IsRegExp) \
- F(IsJSProxy) \
- F(IsConstructCall) \
- F(CallFunction) \
- F(DefaultConstructorCallSuper) \
- F(ArgumentsLength) \
- F(Arguments) \
- F(ValueOf) \
- F(SetValueOf) \
- F(IsDate) \
- F(DateField) \
- F(StringCharFromCode) \
- F(StringCharAt) \
- F(OneByteSeqStringSetChar) \
- F(TwoByteSeqStringSetChar) \
- F(ObjectEquals) \
- F(IsObject) \
- F(IsFunction) \
- F(IsSpecObject) \
- F(IsSimdValue) \
- F(IsStringWrapperSafeForDefaultValueOf) \
- F(MathPow) \
- F(IsMinusZero) \
- F(HasCachedArrayIndex) \
- F(GetCachedArrayIndex) \
- F(FastOneByteArrayJoin) \
- F(GeneratorNext) \
- F(GeneratorThrow) \
- F(DebugBreakInOptimizedCode) \
- F(ClassOf) \
- F(StringCharCodeAt) \
- F(StringAdd) \
- F(SubString) \
- F(StringCompare) \
- F(RegExpExec) \
- F(RegExpConstructResult) \
- F(NumberToString) \
- F(ToObject) \
- F(DebugIsActive)
+#define FOR_EACH_FULL_CODE_INTRINSIC(F) \
+ F(IsSmi) \
+ F(IsArray) \
+ F(IsTypedArray) \
+ F(IsRegExp) \
+ F(IsJSProxy) \
+ F(IsConstructCall) \
+ F(Call) \
+ F(CallFunction) \
+ F(DefaultConstructorCallSuper) \
+ F(ArgumentsLength) \
+ F(Arguments) \
+ F(ValueOf) \
+ F(SetValueOf) \
+ F(IsDate) \
+ F(DateField) \
+ F(StringCharFromCode) \
+ F(StringCharAt) \
+ F(OneByteSeqStringSetChar) \
+ F(TwoByteSeqStringSetChar) \
+ F(ObjectEquals) \
+ F(IsFunction) \
+ F(IsSpecObject) \
+ F(IsSimdValue) \
+ F(MathPow) \
+ F(IsMinusZero) \
+ F(HasCachedArrayIndex) \
+ F(GetCachedArrayIndex) \
+ F(FastOneByteArrayJoin) \
+ F(GeneratorNext) \
+ F(GeneratorThrow) \
+ F(DebugBreakInOptimizedCode) \
+ F(ClassOf) \
+ F(StringCharCodeAt) \
+ F(StringAdd) \
+ F(SubString) \
+ F(RegExpExec) \
+ F(RegExpConstructResult) \
+ F(ToInteger) \
+ F(NumberToString) \
+ F(ToString) \
+ F(ToName) \
+ F(ToObject) \
+ F(DebugIsActive) \
+ F(CreateIterResultObject)
#define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
FOR_EACH_FULL_CODE_INTRINSIC(GENERATOR_DECLARATION)
@@ -550,7 +545,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
- void EmitAccessor(Expression* expression);
+ void EmitAccessor(ObjectLiteralProperty* property);
+
+ bool NeedsHoleCheckForLoad(VariableProxy* proxy);
// Expects the arguments and the function already pushed.
void EmitResolvePossiblyDirectEval(int arg_count);
@@ -582,7 +579,7 @@ class FullCodeGenerator: public AstVisitor {
// Adds the properties to the class (function) object and to its prototype.
// Expects the class (function) in the accumulator. The class (function) is
// in the accumulator after installing all the properties.
- void EmitClassDefineProperties(ClassLiteral* lit, int* used_store_slots);
+ void EmitClassDefineProperties(ClassLiteral* lit);
// Pushes the property key as a Name on the stack.
void EmitPropertyKey(ObjectLiteralProperty* property, BailoutId bailout_id);
@@ -636,9 +633,11 @@ class FullCodeGenerator: public AstVisitor {
// Adds the [[HomeObject]] to |initializer| if it is a FunctionLiteral.
// The value of the initializer is expected to be at the top of the stack.
// |offset| is the offset in the stack where the home object can be found.
- void EmitSetHomeObjectIfNeeded(
- Expression* initializer, int offset,
- FeedbackVectorICSlot slot = FeedbackVectorICSlot::Invalid());
+ void EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot);
+
+ void EmitSetHomeObjectAccumulator(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot);
void EmitLoadSuperConstructor(SuperCallReference* super_call_ref);
@@ -696,10 +695,9 @@ class FullCodeGenerator: public AstVisitor {
Handle<Script> script() { return info_->script(); }
bool is_eval() { return info_->is_eval(); }
bool is_native() { return info_->is_native(); }
- LanguageMode language_mode() { return function()->language_mode(); }
+ LanguageMode language_mode() { return literal()->language_mode(); }
bool has_simple_parameters() { return info_->has_simple_parameters(); }
- // TODO(titzer): rename this to literal().
- FunctionLiteral* function() { return info_->literal(); }
+ FunctionLiteral* literal() const { return info_->literal(); }
Scope* scope() { return scope_; }
static Register result_register();
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index 1503211b0b..cce7357962 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -94,14 +94,14 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -109,8 +109,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -139,7 +138,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -151,7 +150,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_real_stack_limit(isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
@@ -233,6 +232,11 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
@@ -240,7 +244,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, edi, ebx, edx);
}
@@ -275,58 +279,29 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, eax, ebx, edx);
}
-
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ lea(edx,
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(num_parameters)));
- __ push(Immediate(Smi::FromInt(rest_index)));
- __ push(Immediate(Smi::FromInt(language_mode())));
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, eax, ebx, edx);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(edx,
+ __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
+ Immediate(Smi::FromInt(num_parameters)));
+ __ lea(ArgumentsAccessNewDescriptor::parameter_pointer(),
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(num_parameters)));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // Arguments to ArgumentsAccessStub:
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -367,7 +342,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -460,7 +435,7 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
- SetReturnPosition(function());
+ SetReturnPosition(literal());
int no_frame_start = masm_->pc_offset();
__ leave();
@@ -746,7 +721,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
@@ -1083,8 +1058,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
- __ LoadHeapObject(ebx, FeedbackVector());
- int vector_index = FeedbackVector()->GetIndex(slot);
+ __ EmitLoadTypeFeedbackVector(ebx);
+ int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
@@ -1191,29 +1166,35 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
- __ push(esi);
__ push(Immediate(info));
- __ push(Immediate(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(eax);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
- __ mov(StoreDescriptor::ValueRegister(),
- Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(isolate()->factory()->home_object_symbol()));
+ __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ mov(StoreDescriptor::ReceiverRegister(), eax);
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(isolate()->factory()->home_object_symbol()));
+ __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1384,66 +1365,26 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(function() != nullptr &&
- (function()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
- __ bind(&done);
- context()->Plug(eax);
- break;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(eax, var);
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ mov(eax, isolate()->factory()->undefined_value());
}
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
}
context()->Plug(var);
break;
@@ -1481,8 +1422,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// eax = regexp literal clone.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, isolate()->factory()->undefined_value());
__ j(not_equal, &materialized, Label::kNear);
@@ -1525,11 +1465,18 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ push(Immediate(isolate()->factory()->null_value()));
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1565,10 +1512,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1596,22 +1539,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
}
PrepareForBailoutForId(key->id(), NO_REGISTERS);
-
if (NeedsHomeObject(value)) {
- __ mov(StoreDescriptor::ReceiverRegister(), eax);
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
- __ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1622,8 +1557,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1638,12 +1574,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1656,15 +1592,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
++it) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
+ EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1698,8 +1628,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1741,10 +1672,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(eax);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1841,7 +1768,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(eax);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2251,40 +2179,28 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ Label allocate, done_allocate;
- __ Allocate(instance_size, eax, ecx, edx, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate, TAG_OBJECT);
+ __ jmp(&done_allocate, Label::kNear);
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ bind(&done_allocate);
+ __ mov(ebx, GlobalObjectOperand());
__ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ pop(ecx);
- __ mov(edx, isolate()->factory()->ToBoolean(done));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ mov(FieldOperand(eax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
- __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
- ecx, edx, kDontSaveFPRegs);
+ __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
+ __ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
+ isolate()->factory()->ToBoolean(done));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
@@ -2426,8 +2342,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in eax.
DCHECK(lit != NULL);
__ push(eax);
@@ -2459,8 +2374,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3157,18 +3073,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ LoadHeapObject(ebx, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
}
@@ -3201,16 +3113,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ LoadHeapObject(ebx, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3218,6 +3121,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
}
@@ -3243,59 +3148,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, if_false);
- __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3340,97 +3192,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(eax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, &skip_lookup);
-
- // Check for fast case object. Return false for slow case objects.
- __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, isolate()->factory()->hash_table_map());
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(ecx, ebx);
- __ cmp(ecx, 0);
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(ebx, ebx);
- // ebx: descriptor array.
- // ecx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
- __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, isolate()->factory()->value_of_string());
- __ j(equal, if_false);
- __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(ebx, ecx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
-
- // Reload map as register ebx was used as temporary above.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ JumpIfSmi(ecx, if_false);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edx,
- FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ cmp(ecx,
- ContextOperand(edx,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3902,6 +3663,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(eax, &done_convert, Label::kNear);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -3915,6 +3693,40 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to a name.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CmpObjectType(eax, LAST_NAME_TYPE, ecx);
+ __ j(below_equal, &done_convert, Label::kNear);
+ __ bind(&convert);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4054,6 +3866,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to edi.
+ int const argc = args->length() - 2;
+ __ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ mov(eax, Immediate(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, eax);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4079,7 +3911,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ push(eax);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(eax);
@@ -4090,16 +3922,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ push(result_register());
-
- // Load original constructor into ecx.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
@@ -4130,14 +3955,14 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ bind(&args_set_up);
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
- __ mov(ebx, Immediate(isolate()->factory()->undefined_value()));
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 1 * kPointerSize));
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0 * kPointerSize));
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
+ context()->DropAndPlug(1, eax);
}
@@ -4468,17 +4293,43 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
+ __ mov(ebx, GlobalObjectOperand());
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ pop(FieldOperand(eax, JSIteratorResult::kDoneOffset));
+ __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as receiver.
- __ mov(eax, GlobalObjectOperand());
- __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+ // Push undefined as receiver.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
- // Load the function from the receiver.
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ mov(eax, GlobalObjectOperand());
+ __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ __ mov(eax, ContextOperand(eax, expr->context_index()));
}
@@ -4964,27 +4815,27 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(eax, if_false);
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
Split(not_zero, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(eax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
- __ j(equal, if_true);
- __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+ // Check for callable and not undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ecx, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ __ cmp(ecx, 1 << Map::kIsCallable);
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false);
- __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5025,20 +4876,20 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ Pop(edx);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 2f0173191b..f38c01bbea 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -113,7 +113,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -128,8 +128,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ lw(at, MemOperand(sp, receiver_offset));
@@ -163,7 +162,7 @@ void FullCodeGenerator::Generate() {
__ Subu(t5, sp, Operand(locals_count * kPointerSize));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t5, Operand(a2));
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
@@ -191,7 +190,7 @@ void FullCodeGenerator::Generate() {
}
}
- bool function_in_register = true;
+ bool function_in_register_a1 = true;
// Possibly allocate a local context.
if (info->scope()->num_heap_slots() > 0) {
@@ -212,7 +211,7 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- function_in_register = false;
+ function_in_register_a1 = false;
// Context is returned in v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
@@ -245,14 +244,19 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register_a1| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
+ if (!function_in_register_a1) {
__ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, a1, a2, a3);
}
@@ -274,6 +278,7 @@ void FullCodeGenerator::Generate() {
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ function_in_register_a1 = false;
Label non_construct_frame, done;
__ Branch(&non_construct_frame, ne, a1,
@@ -290,58 +295,30 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, v0, a2, a3);
}
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ Addu(a3, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a2, Operand(Smi::FromInt(num_parameters)));
- __ li(a1, Operand(Smi::FromInt(rest_index)));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(a3, a2, a1, a0);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, v0, a1, a2);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register_a1) {
// Load this again, if it's used by the local context below.
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(a3, a1);
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ Addu(a2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a1, Operand(Smi::FromInt(num_parameters)));
- __ Push(a3, a2, a1);
+ __ li(ArgumentsAccessNewDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ Addu(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
// Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -383,7 +360,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -495,7 +472,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
- SetReturnPosition(function());
+ SetReturnPosition(literal());
masm_->mov(sp, fp);
int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
@@ -793,7 +770,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ Branch(&skip);
@@ -1147,9 +1124,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- __ li(a1, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(a1);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
+ int vector_index = SmiFromSlot(slot)->value();
__ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@@ -1258,28 +1235,37 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
- __ li(a0, Operand(info));
- __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ Push(info);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(v0);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ lw(StoreDescriptor::ValueRegister(),
- MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ lw(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), v0);
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ lw(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1451,69 +1437,28 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ GetVar(v0, var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
} else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(v0, var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
- }
- context()->Plug(v0);
- break;
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
+ context()->Plug(v0);
+ break;
}
context()->Plug(var);
break;
@@ -1552,8 +1497,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// a0 = RegExp literal clone
__ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ lw(t1, FieldMemOperand(t0, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&materialized, ne, t1, Operand(at));
@@ -1590,12 +1534,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1624,10 +1575,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1656,7 +1603,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
@@ -1664,14 +1611,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
- __ Move(StoreDescriptor::ReceiverRegister(), v0);
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ lw(StoreDescriptor::ValueRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1684,8 +1624,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1703,12 +1644,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1723,13 +1664,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1764,8 +1699,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1811,10 +1747,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(v0);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1907,7 +1839,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(v0);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2323,41 +2256,29 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
+ Label allocate, done_allocate;
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
-
- __ Allocate(instance_size, v0, a2, a3, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+ __ jmp(&done_allocate);
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ lw(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
+ __ bind(&done_allocate);
__ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
__ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(a2);
- __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
- __ li(t0, Operand(isolate()->factory()->empty_fixed_array()));
+ __ LoadRoot(a3,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
__ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sw(a2,
- FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset));
- __ sw(a3,
- FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(v0, JSGeneratorObject::kResultValuePropertyOffset,
- a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ sw(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
+ __ sw(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
@@ -2497,8 +2418,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in v0.
DCHECK(lit != NULL);
__ push(v0);
@@ -2532,8 +2452,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3235,7 +3156,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Push constructor on the stack. If it's not a function it's used as
// receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
- // ignored.
+ // ignored.g
DCHECK(!expr->expression()->IsSuperPropertyReference());
VisitForStackValue(expr->expression());
@@ -3255,18 +3176,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ li(a2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(v0);
}
@@ -3299,16 +3216,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ li(a2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3316,6 +3224,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(v0);
}
@@ -3341,58 +3251,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ NonNegativeSmiTst(v0, at);
- Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
- __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
- __ Branch(if_false, ne, at, Operand(zero_reg));
- __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3438,94 +3296,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(v0);
-
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset));
- __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Branch(&skip_lookup, ne, t0, Operand(zero_reg));
-
- // Check for fast case object. Generate false result for slow case object.
- __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ LoadRoot(t0, Heap::kHashTableMapRootIndex);
- __ Branch(if_false, eq, a2, Operand(t0));
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(a3, a1);
- __ Branch(&done, eq, a3, Operand(zero_reg));
-
- __ LoadInstanceDescriptors(a1, t0);
- // t0: descriptor array.
- // a3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ li(at, Operand(DescriptorArray::kDescriptorSize));
- __ Mul(a3, a3, at);
- // Calculate location of the first key name.
- __ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(a2, t0);
- __ sll(t1, a3, kPointerSizeLog2);
- __ Addu(a2, a2, t1);
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of t2 to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ li(t2, Operand(isolate()->factory()->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ lw(a3, MemOperand(t0, 0));
- __ Branch(if_false, eq, a3, Operand(t2));
- __ Addu(t0, t0, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ Branch(&loop, ne, t0, Operand(a2));
-
- __ bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
- __ JumpIfSmi(a2, if_false);
- __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4007,6 +3777,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into v0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(v0, &done_convert);
+ __ Push(v0);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -4021,6 +3808,40 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into a0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into v0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ Label convert, done_convert;
+ __ JumpIfSmi(v0, &convert);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
+ __ bind(&convert);
+ __ Push(v0);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4164,6 +3985,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to a1.
+ int const argc = args->length() - 2;
+ __ lw(a1, MemOperand(sp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ li(a0, Operand(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, v0);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4189,7 +4030,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ push(v0);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(v0);
@@ -4200,22 +4041,18 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
- // Load original constructor into t0.
- __ lw(t0, MemOperand(sp, 1 * kPointerSize));
+ // Load original constructor into a3.
+ __ lw(a3, MemOperand(sp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a3,
+ __ lw(t0, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, t0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// default constructor has no arguments, so no adaptor frame means no args.
__ mov(a0, zero_reg);
@@ -4238,8 +4075,8 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
// Pre-decrement a2 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
__ Addu(a2, a2, Operand(-kPointerSize));
- __ lw(a3, MemOperand(a2));
- __ Push(a3);
+ __ lw(t0, MemOperand(a2));
+ __ Push(t0);
__ Addu(a1, a1, Operand(-1));
__ Branch(&loop, ne, a1, Operand(zero_reg));
}
@@ -4248,14 +4085,12 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ sll(at, a0, kPointerSizeLog2);
__ Addu(at, at, Operand(sp));
__ lw(a1, MemOperand(at, 0));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
+ // Restore context register.
+ __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(result_register());
+ context()->DropAndPlug(1, result_register());
}
@@ -4552,18 +4387,44 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
+ __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+ __ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ Pop(a2, a3);
+ __ LoadRoot(t0, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sw(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
+ __ sw(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ jmp(&done);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- Register receiver = LoadDescriptor::ReceiverRegister();
- __ lw(receiver, GlobalObjectOperand());
- __ lw(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
- __ push(receiver);
+ // Push undefined as the receiver.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ push(v0);
- // Load the function from the receiver.
- __ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ lw(v0, GlobalObjectOperand());
+ __ lw(v0, FieldMemOperand(v0, GlobalObject::kNativeContextOffset));
+ __ lw(v0, ContextOperand(v0, expr->context_index()));
}
@@ -5054,23 +4915,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(v0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
- Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
- if_true, if_false, fall_through);
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ Split(eq, a1, Operand(1 << Map::kIsCallable), if_true, if_false,
+ fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(v0, if_false);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
- // Check for JS objects => true.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
- __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // Check for undetectable objects => false.
+ __ Branch(if_false, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Check for callable or undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ __ And(a1, a1,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5111,19 +4972,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(t0, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(t0), if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ mov(a0, result_register());
+ __ pop(a1);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ LoadRoot(at, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(at), if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index db25e433dc..dcdff515ef 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -113,7 +113,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -128,8 +128,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ ld(at, MemOperand(sp, receiver_offset));
@@ -160,7 +159,7 @@ void FullCodeGenerator::Generate() {
__ Dsubu(t1, sp, Operand(locals_count * kPointerSize));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t1, Operand(a2));
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
@@ -188,7 +187,7 @@ void FullCodeGenerator::Generate() {
}
}
- bool function_in_register = true;
+ bool function_in_register_a1 = true;
// Possibly allocate a local context.
if (info->scope()->num_heap_slots() > 0) {
@@ -209,7 +208,7 @@ void FullCodeGenerator::Generate() {
__ push(a1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- function_in_register = false;
+ function_in_register_a1 = false;
// Context is returned in v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
@@ -242,14 +241,19 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register_a1| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
+ if (!function_in_register_a1) {
__ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, a1, a2, a3);
}
@@ -270,6 +274,7 @@ void FullCodeGenerator::Generate() {
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+ function_in_register_a1 = false;
Label non_construct_frame, done;
__ Branch(&non_construct_frame, ne, a1,
@@ -286,58 +291,30 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, v0, a2, a3);
}
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ Daddu(a3, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a2, Operand(Smi::FromInt(num_parameters)));
- __ li(a1, Operand(Smi::FromInt(rest_index)));
- __ li(a0, Operand(Smi::FromInt(language_mode())));
- __ Push(a3, a2, a1, a0);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, v0, a1, a2);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register_a1) {
// Load this again, if it's used by the local context below.
- __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mov(a3, a1);
+ __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ Daddu(a2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ li(a1, Operand(Smi::FromInt(num_parameters)));
- __ Push(a3, a2, a1);
+ __ li(ArgumentsAccessNewDescriptor::parameter_count(),
+ Operand(Smi::FromInt(num_parameters)));
+ __ Daddu(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
// Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiever and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -347,6 +324,7 @@ void FullCodeGenerator::Generate() {
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
+
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
@@ -379,7 +357,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
@@ -492,7 +470,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
- SetReturnPosition(function());
+ SetReturnPosition(literal());
masm_->mov(sp, fp);
int no_frame_start = masm_->pc_offset();
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
@@ -790,7 +768,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ Branch(&skip);
@@ -1144,9 +1122,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- __ li(a1, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(a1);
__ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
+ int vector_index = SmiFromSlot(slot)->value();
__ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(vector_index)));
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@@ -1255,28 +1233,37 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ li(a2, Operand(info));
__ CallStub(&stub);
} else {
- __ li(a0, Operand(info));
- __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, a0, a1);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ Push(info);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(v0);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ ld(StoreDescriptor::ValueRegister(),
- MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ ld(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), v0);
+ __ li(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ ld(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1448,69 +1435,28 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ GetVar(v0, var);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ Label done;
+ __ Branch(&done, ne, at, Operand(zero_reg));
+ __ li(a0, Operand(var->name()));
+ __ push(a0);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&done);
} else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- GetVar(v0, var);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq.
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- Label done;
- __ Branch(&done, ne, at, Operand(zero_reg));
- __ li(a0, Operand(var->name()));
- __ push(a0);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
- __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
- }
- context()->Plug(v0);
- break;
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+ __ Movz(v0, a0, at); // Conditional move: Undefined if TheHole.
}
+ context()->Plug(v0);
+ break;
}
context()->Plug(var);
break;
@@ -1549,8 +1495,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// a0 = RegExp literal clone
__ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ ld(a5, FieldMemOperand(a4, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&materialized, ne, a5, Operand(at));
@@ -1587,12 +1532,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(a1, Heap::kNullValueRootIndex);
__ push(a1);
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1621,10 +1573,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1653,7 +1601,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ li(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
@@ -1661,14 +1609,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
- __ Move(StoreDescriptor::ReceiverRegister(), v0);
- __ li(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ ld(StoreDescriptor::ValueRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1681,8 +1622,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ li(a0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes.
__ push(a0);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1700,12 +1642,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1720,13 +1662,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ li(a0, Operand(Smi::FromInt(NONE)));
__ push(a0);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1761,8 +1697,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1808,10 +1745,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(v0);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1904,7 +1837,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(v0);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2320,41 +2254,29 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ Label allocate, done_allocate;
- __ Allocate(instance_size, v0, a2, a3, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+ __ jmp(&done_allocate);
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ ld(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
+ __ bind(&done_allocate);
__ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
__ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(a2);
- __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
- __ li(a4, Operand(isolate()->factory()->empty_fixed_array()));
+ __ LoadRoot(a3,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
__ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
__ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
__ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sd(a2,
- FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset));
- __ sd(a3,
- FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset));
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(v0, JSGeneratorObject::kResultValuePropertyOffset,
- a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+ __ sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
+ __ sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
@@ -2495,8 +2417,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in v0.
DCHECK(lit != NULL);
__ push(v0);
@@ -2530,8 +2451,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3256,18 +3178,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ ld(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ li(a2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(v0);
}
@@ -3300,16 +3218,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ ld(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ li(a2, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(a2);
__ li(a3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3317,6 +3226,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(v0);
}
@@ -3342,58 +3253,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ NonNegativeSmiTst(v0, at);
- Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(v0, if_false);
- __ LoadRoot(at, Heap::kNullValueRootIndex);
- __ Branch(if_true, eq, v0, Operand(at));
- __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ And(at, a1, Operand(1 << Map::kIsUndetectable));
- __ Branch(if_false, ne, at, Operand(zero_reg));
- __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
- if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3439,95 +3298,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(v0);
-
- __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a4, FieldMemOperand(a1, Map::kBitField2Offset));
- __ And(a4, a4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ Branch(&skip_lookup, ne, a4, Operand(zero_reg));
-
- // Check for fast case object. Generate false result for slow case object.
- __ ld(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ LoadRoot(a4, Heap::kHashTableMapRootIndex);
- __ Branch(if_false, eq, a2, Operand(a4));
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(a3, a1);
- __ Branch(&done, eq, a3, Operand(zero_reg));
-
- __ LoadInstanceDescriptors(a1, a4);
- // a4: descriptor array.
- // a3: valid entries in the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
-// Does not need?
-// STATIC_ASSERT(kPointerSize == 4);
- __ li(at, Operand(DescriptorArray::kDescriptorSize));
- __ Dmul(a3, a3, at);
- // Calculate location of the first key name.
- __ Daddu(a4, a4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mov(a2, a4);
- __ dsll(a5, a3, kPointerSizeLog2);
- __ Daddu(a2, a2, a5);
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of a6 to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ li(a6, Operand(isolate()->factory()->value_of_string()));
- __ jmp(&entry);
- __ bind(&loop);
- __ ld(a3, MemOperand(a4, 0));
- __ Branch(if_false, eq, a3, Operand(a6));
- __ Daddu(a4, a4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ Branch(&loop, ne, a4, Operand(a2));
-
- __ bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
- __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
- __ JumpIfSmi(a2, if_false);
- __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
- __ ld(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
- __ ld(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4010,6 +3780,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into v0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(v0, &done_convert);
+ __ Push(v0);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -4024,6 +3811,40 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into a0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+ __ mov(a0, result_register());
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into v0 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ Label convert, done_convert;
+ __ JumpIfSmi(v0, &convert);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ GetObjectType(v0, a1, a1);
+ __ Branch(&done_convert, le, a1, Operand(LAST_NAME_TYPE));
+ __ bind(&convert);
+ __ Push(v0);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4167,6 +3988,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to a1.
+ int const argc = args->length() - 2;
+ __ ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ li(a0, Operand(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, v0);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4192,7 +4033,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ push(v0);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(v0);
@@ -4203,22 +4044,18 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
- // Load original constructor into a4.
- __ ld(a4, MemOperand(sp, 1 * kPointerSize));
+ // Load original constructor into a3.
+ __ ld(a3, MemOperand(sp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame, eq, a3,
+ __ ld(a4, MemOperand(a2, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a4,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// default constructor has no arguments, so no adaptor frame means no args.
__ mov(a0, zero_reg);
@@ -4241,8 +4078,8 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
// Pre-decrement a2 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
__ Daddu(a2, a2, Operand(-kPointerSize));
- __ ld(a3, MemOperand(a2));
- __ Push(a3);
+ __ ld(a4, MemOperand(a2));
+ __ Push(a4);
__ Daddu(a1, a1, Operand(-1));
__ Branch(&loop, ne, a1, Operand(zero_reg));
}
@@ -4251,14 +4088,12 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ dsll(at, a0, kPointerSizeLog2);
__ Daddu(at, at, Operand(sp));
__ ld(a1, MemOperand(at, 0));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Restore context register.
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(result_register());
+ context()->DropAndPlug(1, result_register());
}
@@ -4555,18 +4390,44 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, v0, a2, a3, &runtime, TAG_OBJECT);
+ __ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+ __ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ Pop(a2, a3);
+ __ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
+ __ sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ jmp(&done);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(v0);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- Register receiver = LoadDescriptor::ReceiverRegister();
- __ ld(receiver, GlobalObjectOperand());
- __ ld(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
- __ push(receiver);
+ // Push undefined as the receiver.
+ __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+ __ push(v0);
- // Load the function from the receiver.
- __ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
- __ li(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ ld(v0, GlobalObjectOperand());
+ __ ld(v0, FieldMemOperand(v0, GlobalObject::kNativeContextOffset));
+ __ ld(v0, ContextOperand(v0, expr->context_index()));
}
@@ -5056,23 +4917,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(v0, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ GetObjectType(v0, v0, a1);
- __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
- Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
- if_true, if_false, fall_through);
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ And(a1, a1,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ Split(eq, a1, Operand(1 << Map::kIsCallable), if_true, if_false,
+ fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(v0, if_false);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(if_true, eq, v0, Operand(at));
- // Check for JS objects => true.
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ GetObjectType(v0, v0, a1);
- __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
- __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // Check for undetectable objects => false.
+ __ Branch(if_false, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Check for callable or undetectable objects => false.
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
- __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+ __ And(a1, a1,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5113,19 +4974,21 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(a4, Heap::kTrueValueRootIndex);
Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ mov(a0, result_register());
+ __ pop(a1);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ LoadRoot(a4, Heap::kTrueValueRootIndex);
+ Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 5c25c6dc9e..03def66d5f 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -102,7 +102,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -117,8 +117,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset), r0);
@@ -161,7 +160,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
__ cmpl(ip, r5);
__ bc_short(ge, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -187,7 +186,7 @@ void FullCodeGenerator::Generate() {
}
}
- bool function_in_register = true;
+ bool function_in_register_r4 = true;
// Possibly allocate a local context.
if (info->scope()->num_heap_slots() > 0) {
@@ -208,7 +207,7 @@ void FullCodeGenerator::Generate() {
__ push(r4);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- function_in_register = false;
+ function_in_register_r4 = false;
// Context is returned in r3. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mr(cp, r3);
@@ -241,14 +240,19 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register_r4| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
- if (!function_in_register) {
+ if (!function_in_register_r4) {
__ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, r4, r3, r5);
}
@@ -272,6 +276,7 @@ void FullCodeGenerator::Generate() {
__ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
__ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
Label non_construct_frame, done;
+ function_in_register_r4 = false;
__ bne(&non_construct_frame);
__ LoadP(r3, MemOperand(
@@ -285,56 +290,30 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, r3, r5, r6);
}
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ LoadSmiLiteral(r5, Smi::FromInt(num_parameters));
- __ LoadSmiLiteral(r4, Smi::FromInt(rest_index));
- __ LoadSmiLiteral(r3, Smi::FromInt(language_mode()));
- __ Push(r6, r5, r4, r3);
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, r3, r4, r5);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register) {
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register_r4) {
// Load this again, if it's used by the local context below.
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- } else {
- __ mr(r6, r4);
+ __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ LoadSmiLiteral(r4, Smi::FromInt(num_parameters));
- __ Push(r6, r5, r4);
+ __ LoadSmiLiteral(ArgumentsAccessNewDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ addi(ArgumentsAccessNewDescriptor::parameter_pointer(), fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + offset));
// Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -377,7 +356,7 @@ void FullCodeGenerator::Generate() {
{
Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -487,7 +466,7 @@ void FullCodeGenerator::EmitReturnSequence() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
- SetReturnPosition(function());
+ SetReturnPosition(literal());
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
__ blr();
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
@@ -757,7 +736,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ b(&skip);
@@ -1111,9 +1090,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Label non_proxy;
__ bind(&fixed_array);
- __ Move(r4, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(r4);
__ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
- int vector_index = FeedbackVector()->GetIndex(slot);
+ int vector_index = SmiFromSlot(slot)->value();
__ StoreP(
r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
@@ -1223,28 +1202,37 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ mov(r5, Operand(info));
__ CallStub(&stub);
} else {
- __ mov(r3, Operand(info));
- __ LoadRoot(
- r4, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ Push(cp, r3, r4);
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ Push(info);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(r3);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ LoadP(StoreDescriptor::ValueRegister(),
- MemOperand(sp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ Move(StoreDescriptor::ReceiverRegister(), r3);
+ __ mov(StoreDescriptor::NameRegister(),
+ Operand(isolate()->factory()->home_object_symbol()));
+ __ LoadP(StoreDescriptor::ValueRegister(),
+ MemOperand(sp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1418,67 +1406,26 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ Label done;
+ // Let and const need a read barrier.
+ GetVar(r3, var);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ bne(&done);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
} else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- Label done;
- // Let and const need a read barrier.
- GetVar(r3, var);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ bne(&done);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ mov(r3, Operand(var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- }
- __ bind(&done);
- context()->Plug(r3);
- break;
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
}
+ __ bind(&done);
+ context()->Plug(r3);
+ break;
}
context()->Plug(var);
break;
@@ -1517,8 +1464,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r3 = RegExp literal clone
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ LoadP(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ LoadP(r8, FieldMemOperand(r7, literal_offset), r0);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(r8, ip);
@@ -1555,12 +1501,19 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ LoadRoot(r4, Heap::kNullValueRootIndex);
__ push(r4);
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1590,10 +1543,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1621,7 +1570,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
@@ -1629,14 +1578,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
- __ Move(StoreDescriptor::ReceiverRegister(), r3);
- __ mov(StoreDescriptor::NameRegister(),
- Operand(isolate()->factory()->home_object_symbol()));
- __ LoadP(StoreDescriptor::ValueRegister(), MemOperand(sp));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1649,8 +1591,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
__ push(r3);
__ CallRuntime(Runtime::kSetProperty, 4);
@@ -1668,12 +1611,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1687,13 +1630,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ push(r3);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ LoadSmiLiteral(r3, Smi::FromInt(NONE));
__ push(r3);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1728,8 +1665,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1775,10 +1713,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(r3);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1868,7 +1802,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(r3);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2299,43 +2234,28 @@ void FullCodeGenerator::EmitGeneratorResume(
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
+ Label allocate, done_allocate;
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate, TAG_OBJECT);
+ __ b(&done_allocate);
- __ Allocate(instance_size, r3, r5, r6, &gc_required, TAG_OBJECT);
- __ b(&allocated);
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ LoadP(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
+ __ bind(&done_allocate);
__ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
__ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
__ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
__ pop(r5);
- __ mov(r6, Operand(isolate()->factory()->ToBoolean(done)));
- __ mov(r7, Operand(isolate()->factory()->empty_fixed_array()));
+ __ LoadRoot(r6,
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
__ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
__ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
__ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5,
- FieldMemOperand(r3, JSGeneratorObject::kResultValuePropertyOffset),
- r0);
- __ StoreP(r6,
- FieldMemOperand(r3, JSGeneratorObject::kResultDonePropertyOffset),
- r0);
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(r3, JSGeneratorObject::kResultValuePropertyOffset, r5, r6,
- kLRHasBeenSaved, kDontSaveFPRegs);
+ __ StoreP(r5, FieldMemOperand(r3, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSIteratorResult::kDoneOffset), r0);
}
@@ -2507,8 +2427,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in r3.
DCHECK(lit != NULL);
__ push(r3);
@@ -2542,8 +2461,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3256,18 +3176,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ Move(r5, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(r5);
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(r3);
}
@@ -3300,16 +3216,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ Move(r5, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(r5);
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3317,6 +3224,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->Plug(r3);
}
@@ -3342,60 +3251,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestIfPositiveSmi(r3, r0);
- Split(eq, if_true, if_false, fall_through, cr0);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ JumpIfSmi(r3, if_false);
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r3, ip);
- __ beq(if_true);
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ lbz(r4, FieldMemOperand(r5, Map::kBitFieldOffset));
- __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
- __ bne(if_false, cr0);
- __ lbz(r4, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- __ cmpi(r4, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ blt(if_false);
- __ cmpi(r4, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(le, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3440,97 +3295,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
- &if_false, &fall_through);
-
- __ AssertNotSmi(r3);
-
- __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(ip, FieldMemOperand(r4, Map::kBitField2Offset));
- __ andi(r0, ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ bne(&skip_lookup, cr0);
-
- // Check for fast case object. Generate false result for slow case object.
- __ LoadP(r5, FieldMemOperand(r3, JSObject::kPropertiesOffset));
- __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r5, ip);
- __ beq(if_false);
-
- // Look for valueOf name in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(r6, r4);
- __ cmpi(r6, Operand::Zero());
- __ beq(&done);
-
- __ LoadInstanceDescriptors(r4, r7);
- // r7: descriptor array.
- // r6: valid entries in the descriptor array.
- __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
- __ Mul(r6, r6, ip);
- // Calculate location of the first key name.
- __ addi(r7, r7, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
- // Calculate the end of the descriptor array.
- __ mr(r5, r7);
- __ ShiftLeftImm(ip, r6, Operand(kPointerSizeLog2));
- __ add(r5, r5, ip);
-
- // Loop through all the keys in the descriptor array. If one of these is the
- // string "valueOf" the result is false.
- // The use of ip to store the valueOf string assumes that it is not otherwise
- // used in the loop below.
- __ mov(ip, Operand(isolate()->factory()->value_of_string()));
- __ b(&entry);
- __ bind(&loop);
- __ LoadP(r6, MemOperand(r7, 0));
- __ cmp(r6, ip);
- __ beq(if_false);
- __ addi(r7, r7, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(r7, r5);
- __ bne(&loop);
-
- __ bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ lbz(r5, FieldMemOperand(r4, Map::kBitField2Offset));
- __ ori(r5, r5, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ stb(r5, FieldMemOperand(r4, Map::kBitField2Offset));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
- __ JumpIfSmi(r5, if_false);
- __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
- __ LoadP(r6, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r6, FieldMemOperand(r6, GlobalObject::kNativeContextOffset));
- __ LoadP(r6,
- ContextOperand(r6, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r5, r6);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -4010,6 +3774,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r3 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(r3, &done_convert);
+ __ Push(r3);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -4022,6 +3803,39 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r3 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into r3 and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ Label convert, done_convert;
+ __ JumpIfSmi(r3, &convert);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE);
+ __ ble(&done_convert);
+ __ bind(&convert);
+ __ Push(r3);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4148,6 +3962,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to r4.
+ int const argc = args->length() - 2;
+ __ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ mov(r3, Operand(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, r3);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4173,7 +4007,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ push(r3);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(r3);
@@ -4184,23 +4018,22 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target.
VisitForStackValue(args->at(0));
- // .this_function
- VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
+ // Evaluate super constructor (to stack and r4).
+ VisitForAccumulatorValue(args->at(1));
+ __ push(result_register());
__ mr(r4, result_register());
- __ Push(r4);
- // Load original constructor into r7.
- __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
+ // Load original constructor into r6.
+ __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
__ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadP(r7, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
// default constructor has no arguments, so no adaptor frame means no args.
@@ -4222,20 +4055,18 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ mtctr(r3);
__ bind(&loop);
// Pre-decrement in order to skip receiver.
- __ LoadPU(r6, MemOperand(r5, -kPointerSize));
- __ Push(r6);
+ __ LoadPU(r7, MemOperand(r5, -kPointerSize));
+ __ Push(r7);
__ bdnz(&loop);
}
__ bind(&args_set_up);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
-
- __ Drop(1);
+ // Restore context register.
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(result_register());
+ context()->DropAndPlug(1, r3);
}
@@ -4550,18 +4381,44 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
+ __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ Pop(r5, r6);
+ __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r3, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSIteratorResult::kDoneOffset), r0);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ b(&done);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(r3);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as the receiver.
- Register receiver = LoadDescriptor::ReceiverRegister();
- __ LoadP(receiver, GlobalObjectOperand());
- __ LoadP(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
- __ push(receiver);
+ // Push undefined as the receiver.
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ push(r3);
- // Load the function from the receiver.
- __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ LoadP(r3, GlobalObjectOperand());
+ __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
+ __ LoadP(r3, ContextOperand(r3, expr->context_index()));
}
@@ -5047,23 +4904,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(r3, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r3, r3, r4, JS_FUNCTION_TYPE);
- __ beq(if_true);
- __ cmpi(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ andi(r4, r4,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmpi(r4, Operand(1 << Map::kIsCallable));
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(r3, if_false);
__ CompareRoot(r3, Heap::kNullValueRootIndex);
__ beq(if_true);
- // Check for JS objects => true.
- __ CompareObjectType(r3, r3, r4, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
__ blt(if_false);
- __ CompareInstanceType(r3, r4, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ bgt(if_false);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ __ andi(r0, r4,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, if_true, if_false, fall_through, cr0);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5104,20 +4961,19 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r3, ip);
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ pop(r4);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- // The stub returns 0 for true.
- __ cmpi(r3, Operand::Zero());
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index a9a5e16243..0133c09d6e 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -93,7 +93,7 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -108,8 +108,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis() && info->scope()->has_this_declaration()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
// +1 for return address.
StackArgumentsAccessor args(rsp, info->scope()->num_parameters());
@@ -148,7 +147,7 @@ void FullCodeGenerator::Generate() {
__ subp(rcx, Immediate(locals_count * kPointerSize));
__ CompareRoot(rcx, Heap::kRealStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
@@ -228,6 +227,11 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
@@ -235,7 +239,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ This function");
if (!function_in_register) {
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- // The write barrier clobbers register again, keep is marked as such.
+ // The write barrier clobbers register again, keep it marked as such.
}
SetVar(this_function_var, rdi, rbx, rdx);
}
@@ -271,59 +275,31 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, rax, rbx, rdx);
}
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ leap(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ Push(rdx);
- __ Push(Smi::FromInt(num_parameters));
- __ Push(Smi::FromInt(rest_index));
- __ Push(Smi::FromInt(language_mode()));
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, rax, rbx, rdx);
- }
-
// Possibly allocate an arguments object.
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Arguments object must be allocated after the context object, in
// case the "arguments" or ".arguments" variables are in the context.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ Push(rdi);
- } else {
- __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register) {
+ __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
// The receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ leap(rdx,
- Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
- __ Push(rdx);
- __ Push(Smi::FromInt(num_parameters));
+ __ Move(ArgumentsAccessNewDescriptor::parameter_count(),
+ Smi::FromInt(num_parameters));
+ __ leap(ArgumentsAccessNewDescriptor::parameter_pointer(),
+ Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
+
// Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
-
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -362,7 +338,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -462,7 +438,7 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
- SetReturnPosition(function());
+ SetReturnPosition(literal());
int no_frame_start = masm_->pc_offset();
__ leave();
@@ -758,7 +734,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
@@ -1108,8 +1084,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
- __ Move(rbx, FeedbackVector());
- int vector_index = FeedbackVector()->GetIndex(slot);
+ __ EmitLoadTypeFeedbackVector(rbx);
+ int vector_index = SmiFromSlot(slot)->value();
__ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(vector_index)),
TypeFeedbackVector::MegamorphicSentinel(isolate()));
__ Move(rbx, Smi::FromInt(1)); // Smi indicates slow check
@@ -1218,29 +1194,37 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ Move(rbx, info);
__ CallStub(&stub);
} else {
- __ Push(rsi);
__ Push(info);
- __ Push(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value());
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(rax);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
- __ Move(StoreDescriptor::NameRegister(),
- isolate()->factory()->home_object_symbol());
- __ movp(StoreDescriptor::ValueRegister(),
- Operand(rsp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
+ __ Move(StoreDescriptor::NameRegister(),
+ isolate()->factory()->home_object_symbol());
+ __ movp(StoreDescriptor::ValueRegister(),
+ Operand(rsp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ movp(StoreDescriptor::ReceiverRegister(), rax);
+ __ Move(StoreDescriptor::NameRegister(),
+ isolate()->factory()->home_object_symbol());
+ __ movp(StoreDescriptor::ValueRegister(),
+ Operand(rsp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1413,66 +1397,25 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context slot"
: "[ Stack slot");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(rax, var);
+ __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
} else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
-
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(rax, var);
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ Push(var->name());
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- }
- __ bind(&done);
- context()->Plug(rax);
- break;
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
}
+ __ bind(&done);
+ context()->Plug(rax);
+ break;
}
context()->Plug(var);
break;
@@ -1510,8 +1453,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// rax = regexp literal clone.
__ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ movp(rcx, FieldOperand(rdi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
@@ -1554,11 +1496,18 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ PushRoot(Heap::kNullValueRootIndex);
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1592,10 +1541,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1623,7 +1568,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(StoreDescriptor::NameRegister(), key->value());
__ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
@@ -1631,14 +1576,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(key->id(), NO_REGISTERS);
if (NeedsHomeObject(value)) {
- __ movp(StoreDescriptor::ReceiverRegister(), rax);
- __ Move(StoreDescriptor::NameRegister(),
- isolate()->factory()->home_object_symbol());
- __ movp(StoreDescriptor::ValueRegister(), Operand(rsp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1649,8 +1587,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ Push(Smi::FromInt(SLOPPY)); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1665,12 +1604,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1684,13 +1623,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(Operand(rsp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ Push(Smi::FromInt(NONE));
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
@@ -1723,8 +1656,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1766,10 +1700,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(rax);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1866,7 +1796,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(rax);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2275,42 +2206,27 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
+ Label allocate, done_allocate;
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &allocate, TAG_OBJECT);
+ __ jmp(&done_allocate, Label::kNear);
- __ Allocate(instance_size, rax, rcx, rdx, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ movp(context_register(),
- Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
- __ movp(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ bind(&done_allocate);
+ __ movp(rbx, GlobalObjectOperand());
__ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
__ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ Pop(rcx);
- __ Move(rdx, isolate()->factory()->ToBoolean(done));
__ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
- __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
- isolate()->factory()->empty_fixed_array());
- __ Move(FieldOperand(rax, JSObject::kElementsOffset),
- isolate()->factory()->empty_fixed_array());
- __ movp(FieldOperand(rax, JSGeneratorObject::kResultValuePropertyOffset),
- rcx);
- __ movp(FieldOperand(rax, JSGeneratorObject::kResultDonePropertyOffset),
- rdx);
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(rax, JSGeneratorObject::kResultValuePropertyOffset,
- rcx, rdx, kDontSaveFPRegs);
+ __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+ __ Pop(FieldOperand(rax, JSIteratorResult::kValueOffset));
+ __ LoadRoot(FieldOperand(rax, JSIteratorResult::kDoneOffset),
+ done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
@@ -2417,8 +2333,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in rax.
DCHECK(lit != NULL);
__ Push(rax);
@@ -2450,8 +2365,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3149,18 +3065,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code, but not in the snapshot.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ Move(rbx, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(rbx);
__ Move(rdx, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
context()->Plug(rax);
}
@@ -3193,16 +3105,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ movp(rdi, Operand(rsp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ Move(rbx, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(rbx);
__ Move(rdx, SmiFromSlot(expr->CallFeedbackSlot()));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3210,6 +3113,9 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+
context()->Plug(rax);
}
@@ -3235,59 +3141,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Condition non_negative_smi = masm()->CheckNonNegativeSmi(rax);
- Split(non_negative_smi, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(rax, if_false);
- __ CompareRoot(rax, Heap::kNullValueRootIndex);
- __ j(equal, if_true);
- __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzxbp(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
- __ cmpp(rbx, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(below, if_false);
- __ cmpp(rbx, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3332,91 +3185,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(rax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
- __ testb(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ j(not_zero, &skip_lookup);
-
- // Check for fast case object. Generate false result for slow case object.
- __ movp(rcx, FieldOperand(rax, JSObject::kPropertiesOffset));
- __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ CompareRoot(rcx, Heap::kHashTableMapRootIndex);
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(rcx, rbx);
- __ cmpp(rcx, Immediate(0));
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(rbx, r8);
- // rbx: descriptor array.
- // rcx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
- __ leap(rcx,
- Operand(r8, rcx, times_pointer_size, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ movp(rdx, FieldOperand(r8, 0));
- __ Cmp(rdx, isolate()->factory()->value_of_string());
- __ j(equal, if_false);
- __ addp(r8, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmpp(r8, rcx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ orp(FieldOperand(rbx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
- __ testp(rcx, Immediate(kSmiTagMask));
- __ j(zero, if_false);
- __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
- __ movp(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rdx, FieldOperand(rdx, GlobalObject::kNativeContextOffset));
- __ cmpp(rcx,
- ContextOperand(rdx, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3892,6 +3660,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into rax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(rax, &done_convert, Label::kNear);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -3905,6 +3690,40 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into rax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(rax);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into rax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to a name.
+ Label convert, done_convert;
+ __ JumpIfSmi(rax, &convert, Label::kNear);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CmpObjectType(rax, LAST_NAME_TYPE, rcx);
+ __ j(below_equal, &done_convert, Label::kNear);
+ __ bind(&convert);
+ __ Push(rax);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4044,6 +3863,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to rdi.
+ int const argc = args->length() - 2;
+ __ movp(rdi, Operand(rsp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ Set(rax, argc);
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, rax);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4069,7 +3908,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ Push(rax);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(rax);
@@ -4080,16 +3919,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ Push(result_register());
-
- // Load original constructor into rcx.
- __ movp(rcx, Operand(rsp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
@@ -4119,15 +3951,14 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
}
__ bind(&args_set_up);
- __ movp(rdi, Operand(rsp, rax, times_pointer_size, 0));
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
-
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ movp(rdx, Operand(rsp, rax, times_pointer_size, 1 * kPointerSize));
+ __ movp(rdi, Operand(rsp, rax, times_pointer_size, 0 * kPointerSize));
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Restore context register.
+ __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- context()->Plug(result_register());
+ context()->DropAndPlug(1, rax);
}
@@ -4479,17 +4310,42 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, rax, rcx, rdx, &runtime, TAG_OBJECT);
+ __ movp(rbx, GlobalObjectOperand());
+ __ movp(rbx, FieldOperand(rbx, GlobalObject::kNativeContextOffset));
+ __ movp(rbx, ContextOperand(rbx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
+ __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rbx);
+ __ Pop(FieldOperand(rax, JSIteratorResult::kDoneOffset));
+ __ Pop(FieldOperand(rax, JSIteratorResult::kValueOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(rax);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push the builtins object as receiver.
- __ movp(rax, GlobalObjectOperand());
- __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
- // Load the function from the receiver.
- __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
- __ Move(LoadDescriptor::NameRegister(), expr->name());
- __ Move(LoadDescriptor::SlotRegister(),
- SmiFromSlot(expr->CallRuntimeFeedbackSlot()));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ movp(rax, GlobalObjectOperand());
+ __ movp(rax, FieldOperand(rax, GlobalObject::kNativeContextOffset));
+ __ movp(rax, ContextOperand(rax, expr->context_index()));
}
@@ -4978,22 +4834,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Split(not_zero, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(rax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx);
- __ j(equal, if_true);
- __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE);
+ // Check for callable and not undetectable objects => true.
+ __ movp(rdx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movzxbl(rdx, FieldOperand(rdx, Map::kBitFieldOffset));
+ __ andb(rdx,
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmpb(rdx, Immediate(1 << Map::kIsCallable));
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(rax, if_false);
__ CompareRoot(rax, Heap::kNullValueRootIndex);
__ j(equal, if_true);
- __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rdx);
__ j(below, if_false);
- __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ testb(FieldOperand(rdx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(zero, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5034,20 +4891,20 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ Pop(rdx);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ testp(rax, rax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ CompareRoot(rax, Heap::kTrueValueRootIndex);
+ Split(equal, if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index fdf42cdb8a..a85152d7a9 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -94,14 +94,14 @@ void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
- SetFunctionPosition(function());
+ SetFunctionPosition(literal());
Comment cmnt(masm_, "[ function compiled by full code generator");
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
- function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+ literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
@@ -109,8 +109,7 @@ void FullCodeGenerator::Generate() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info->language_mode()) && !info->is_native() &&
- info->MayUseThis()) {
+ if (info->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
// +1 for return address.
int receiver_offset = (info->scope()->num_parameters() + 1) * kPointerSize;
@@ -139,7 +138,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
- DCHECK(!IsGeneratorFunction(function()->kind()) || locals_count == 0);
+ DCHECK(!IsGeneratorFunction(literal()->kind()) || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -151,7 +150,7 @@ void FullCodeGenerator::Generate() {
ExternalReference::address_of_real_stack_limit(isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
}
__ mov(eax, Immediate(isolate()->factory()->undefined_value()));
@@ -230,6 +229,11 @@ void FullCodeGenerator::Generate() {
}
}
+ PrepareForBailoutForId(BailoutId::Prologue(), NO_REGISTERS);
+ // Function register is trashed in case we bailout here. But since that
+ // could happen only when we allocate a context the value of
+ // |function_in_register| is correct.
+
// Possibly set up a local binding to the this function which is used in
// derived constructors with super calls.
Variable* this_function_var = scope()->this_function_var();
@@ -272,58 +276,29 @@ void FullCodeGenerator::Generate() {
SetVar(new_target_var, eax, ebx, edx);
}
-
- // Possibly allocate RestParameters
- int rest_index;
- Variable* rest_param = scope()->rest_parameter(&rest_index);
- if (rest_param) {
- Comment cmnt(masm_, "[ Allocate rest parameter array");
-
- int num_parameters = info->scope()->num_parameters();
- int offset = num_parameters * kPointerSize;
-
- __ lea(edx,
- Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(num_parameters)));
- __ push(Immediate(Smi::FromInt(rest_index)));
- __ push(Immediate(Smi::FromInt(language_mode())));
-
- RestParamAccessStub stub(isolate());
- __ CallStub(&stub);
-
- SetVar(rest_param, eax, ebx, edx);
- }
-
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (function_in_register) {
- __ push(edi);
- } else {
- __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ if (!function_in_register) {
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ lea(edx,
+ __ mov(ArgumentsAccessNewDescriptor::parameter_count(),
+ Immediate(Smi::FromInt(num_parameters)));
+ __ lea(ArgumentsAccessNewDescriptor::parameter_pointer(),
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
- __ push(edx);
- __ push(Immediate(Smi::FromInt(num_parameters)));
- // Arguments to ArgumentsAccessStub:
- // function, receiver address, parameter count.
- // The stub will rewrite receiver and parameter count if the previous
- // stack frame was an arguments adapter frame.
- ArgumentsAccessStub::Type type;
- if (is_strict(language_mode()) || !has_simple_parameters()) {
- type = ArgumentsAccessStub::NEW_STRICT;
- } else if (function()->has_duplicate_parameters()) {
- type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
- } else {
- type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
- }
+ // Arguments to ArgumentsAccessStub:
+ // function, parameter pointer, parameter count.
+ // The stub will rewrite parameter pointer and parameter count if the
+ // previous stack frame was an arguments adapter frame.
+ bool is_unmapped = is_strict(language_mode()) || !has_simple_parameters();
+ ArgumentsAccessStub::Type type = ArgumentsAccessStub::ComputeType(
+ is_unmapped, literal()->has_duplicate_parameters());
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
@@ -364,7 +339,7 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
- VisitStatements(function()->body());
+ VisitStatements(literal()->body());
DCHECK(loop_depth() == 0);
}
}
@@ -457,7 +432,7 @@ void FullCodeGenerator::EmitReturnSequence() {
EmitProfilingCounterReset();
__ bind(&ok);
- SetReturnPosition(function());
+ SetReturnPosition(literal());
int no_frame_start = masm_->pc_offset();
__ leave();
@@ -743,7 +718,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
// Only prepare for bailouts before splits if we're in a test
// context. Otherwise, we let the Visit function deal with the
// preparation to avoid preparing with the same AST id twice.
- if (!context()->IsTest() || !info_->IsOptimizable()) return;
+ if (!context()->IsTest()) return;
Label skip;
if (should_normalize) __ jmp(&skip, Label::kNear);
@@ -1076,8 +1051,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array);
// No need for a write barrier, we are storing a Smi in the feedback vector.
- __ LoadHeapObject(ebx, FeedbackVector());
- int vector_index = FeedbackVector()->GetIndex(slot);
+ __ EmitLoadTypeFeedbackVector(ebx);
+ int vector_index = SmiFromSlot(slot)->value();
__ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(vector_index)),
Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
@@ -1184,29 +1159,35 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
__ mov(ebx, Immediate(info));
__ CallStub(&stub);
} else {
- __ push(esi);
__ push(Immediate(info));
- __ push(Immediate(pretenure
- ? isolate()->factory()->true_value()
- : isolate()->factory()->false_value()));
- __ CallRuntime(Runtime::kNewClosure, 3);
+ __ CallRuntime(
+ pretenure ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure, 1);
}
context()->Plug(eax);
}
-void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
- int offset,
- FeedbackVectorICSlot slot) {
- if (NeedsHomeObject(initializer)) {
- __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
- __ mov(StoreDescriptor::ValueRegister(),
- Operand(esp, offset * kPointerSize));
- if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
- CallStoreIC();
- }
+void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
+ FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(isolate()->factory()->home_object_symbol()));
+ __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
+}
+
+
+void FullCodeGenerator::EmitSetHomeObjectAccumulator(
+ Expression* initializer, int offset, FeedbackVectorICSlot slot) {
+ DCHECK(NeedsHomeObject(initializer));
+ __ mov(StoreDescriptor::ReceiverRegister(), eax);
+ __ mov(StoreDescriptor::NameRegister(),
+ Immediate(isolate()->factory()->home_object_symbol()));
+ __ mov(StoreDescriptor::ValueRegister(), Operand(esp, offset * kPointerSize));
+ if (FLAG_vector_stores) EmitLoadStoreICSlot(slot);
+ CallStoreIC();
}
@@ -1377,66 +1358,26 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
DCHECK_EQ(NOT_INSIDE_TYPEOF, typeof_mode);
Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
: "[ Stack variable");
- if (var->binding_needs_init()) {
- // var->scope() may be NULL when the proxy is located in eval code and
- // refers to a potential outside binding. Currently those bindings are
- // always looked up dynamically, i.e. in that case
- // var->location() == LOOKUP.
- // always holds.
- DCHECK(var->scope() != NULL);
-
- // Check if the binding really needs an initialization check. The check
- // can be skipped in the following situation: we have a LET or CONST
- // binding in harmony mode, both the Variable and the VariableProxy have
- // the same declaration scope (i.e. they are both in global code, in the
- // same function or in the same eval code) and the VariableProxy is in
- // the source physically located after the initializer of the variable.
- //
- // We cannot skip any initialization checks for CONST in non-harmony
- // mode because const variables may be declared but never initialized:
- // if (false) { const x; }; var y = x;
- //
- // The condition on the declaration scopes is a conservative check for
- // nested functions that access a binding and are called before the
- // binding is initialized:
- // function() { f(); let x = 1; function f() { x = 2; } }
- //
- bool skip_init_check;
- if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
- skip_init_check = false;
- } else if (var->is_this()) {
- CHECK(info_->has_literal() &&
- (info_->literal()->kind() & kSubclassConstructor) != 0);
- // TODO(dslomov): implement 'this' hole check elimination.
- skip_init_check = false;
- } else {
- // Check that we always have valid source position.
- DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
- DCHECK(proxy->position() != RelocInfo::kNoPosition);
- skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
- }
- if (!skip_init_check) {
- // Let and const need a read barrier.
- Label done;
- GetVar(eax, var);
- __ cmp(eax, isolate()->factory()->the_hole_value());
- __ j(not_equal, &done, Label::kNear);
- if (var->mode() == LET || var->mode() == CONST) {
- // Throw a reference error when using an uninitialized let/const
- // binding in harmony mode.
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- } else {
- // Uninitalized const bindings outside of harmony mode are unholed.
- DCHECK(var->mode() == CONST_LEGACY);
- __ mov(eax, isolate()->factory()->undefined_value());
- }
- __ bind(&done);
- context()->Plug(eax);
- break;
+ if (NeedsHoleCheckForLoad(proxy)) {
+ // Let and const need a read barrier.
+ Label done;
+ GetVar(eax, var);
+ __ cmp(eax, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &done, Label::kNear);
+ if (var->mode() == LET || var->mode() == CONST) {
+ // Throw a reference error when using an uninitialized let/const
+ // binding in harmony mode.
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ } else {
+ // Uninitialized legacy const bindings are unholed.
+ DCHECK(var->mode() == CONST_LEGACY);
+ __ mov(eax, isolate()->factory()->undefined_value());
}
+ __ bind(&done);
+ context()->Plug(eax);
+ break;
}
context()->Plug(var);
break;
@@ -1474,8 +1415,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// eax = regexp literal clone.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
- int literal_offset =
- FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+ int literal_offset = LiteralsArray::OffsetOfLiteralAt(expr->literal_index());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, isolate()->factory()->undefined_value());
__ j(not_equal, &materialized, Label::kNear);
@@ -1518,11 +1458,18 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
}
-void FullCodeGenerator::EmitAccessor(Expression* expression) {
+void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
+ Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
__ push(Immediate(isolate()->factory()->null_value()));
} else {
VisitForStackValue(expression);
+ if (NeedsHomeObject(expression)) {
+ DCHECK(property->kind() == ObjectLiteral::Property::GETTER ||
+ property->kind() == ObjectLiteral::Property::SETTER);
+ int offset = property->kind() == ObjectLiteral::Property::GETTER ? 2 : 3;
+ EmitSetHomeObject(expression, offset, property->GetSlot());
+ }
}
}
@@ -1558,10 +1505,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
AccessorTable accessor_table(zone());
int property_index = 0;
- // store_slot_index points to the vector IC slot for the next store IC used.
- // ObjectLiteral::ComputeFeedbackRequirements controls the allocation of slots
- // and must be updated if the number of store ICs emitted here changes.
- int store_slot_index = 0;
for (; property_index < expr->properties()->length(); property_index++) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
if (property->is_computed_name()) break;
@@ -1589,22 +1532,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
__ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
+ EmitLoadStoreICSlot(property->GetSlot(0));
CallStoreIC();
} else {
CallStoreIC(key->LiteralFeedbackId());
}
PrepareForBailoutForId(key->id(), NO_REGISTERS);
-
if (NeedsHomeObject(value)) {
- __ mov(StoreDescriptor::ReceiverRegister(), eax);
- __ mov(StoreDescriptor::NameRegister(),
- Immediate(isolate()->factory()->home_object_symbol()));
- __ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
- if (FLAG_vector_stores) {
- EmitLoadStoreICSlot(expr->GetNthSlot(store_slot_index++));
- }
- CallStoreIC();
+ EmitSetHomeObjectAccumulator(value, 0, property->GetSlot(1));
}
} else {
VisitForEffect(value);
@@ -1615,8 +1550,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
__ push(Immediate(Smi::FromInt(SLOPPY))); // Language mode
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
@@ -1631,12 +1567,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->getter = value;
+ accessor_table.lookup(key)->second->getter = property;
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store()) {
- accessor_table.lookup(key)->second->setter = value;
+ accessor_table.lookup(key)->second->setter = property;
}
break;
}
@@ -1649,15 +1585,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
++it) {
__ push(Operand(esp, 0)); // Duplicate receiver.
VisitForStackValue(it->first);
- EmitAccessor(it->second->getter);
- EmitSetHomeObjectIfNeeded(
- it->second->getter, 2,
- expr->SlotForHomeObject(it->second->getter, &store_slot_index));
+ EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- EmitSetHomeObjectIfNeeded(
- it->second->setter, 3,
- expr->SlotForHomeObject(it->second->setter, &store_slot_index));
__ push(Immediate(Smi::FromInt(NONE)));
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -1691,8 +1621,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
EmitPropertyKey(property, expr->GetIdForProperty(property_index));
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(
- value, 2, expr->SlotForHomeObject(value, &store_slot_index));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -1734,10 +1665,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
} else {
context()->Plug(eax);
}
-
- // Verify that compilation exactly consumed the number of store ic slots that
- // the ObjectLiteral node had to offer.
- DCHECK(!FLAG_vector_stores || store_slot_index == expr->slot_count());
}
@@ -1832,7 +1759,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ Push(eax);
if (subexpr->IsSpread()) {
VisitForStackValue(subexpr->AsSpread()->expression());
- __ InvokeBuiltin(Builtins::CONCAT_ITERABLE_TO_ARRAY, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::CONCAT_ITERABLE_TO_ARRAY_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
VisitForStackValue(subexpr);
__ CallRuntime(Runtime::kAppendElement, 2);
@@ -2242,40 +2170,28 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
- Label gc_required;
- Label allocated;
-
- const int instance_size = 5 * kPointerSize;
- DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
- instance_size);
+ Label allocate, done_allocate;
- __ Allocate(instance_size, eax, ecx, edx, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &allocate, TAG_OBJECT);
+ __ jmp(&done_allocate, Label::kNear);
- __ bind(&gc_required);
- __ Push(Smi::FromInt(instance_size));
+ __ bind(&allocate);
+ __ Push(Smi::FromInt(JSIteratorResult::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ mov(context_register(),
- Operand(ebp, StandardFrameConstants::kContextOffset));
- __ bind(&allocated);
- __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ bind(&done_allocate);
+ __ mov(ebx, GlobalObjectOperand());
__ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
__ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
- __ pop(ecx);
- __ mov(edx, isolate()->factory()->ToBoolean(done));
__ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
isolate()->factory()->empty_fixed_array());
__ mov(FieldOperand(eax, JSObject::kElementsOffset),
isolate()->factory()->empty_fixed_array());
- __ mov(FieldOperand(eax, JSGeneratorObject::kResultValuePropertyOffset), ecx);
- __ mov(FieldOperand(eax, JSGeneratorObject::kResultDonePropertyOffset), edx);
-
- // Only the value field needs a write barrier, as the other values are in the
- // root set.
- __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, ecx,
- edx, kDontSaveFPRegs);
+ __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
+ __ mov(FieldOperand(eax, JSIteratorResult::kDoneOffset),
+ isolate()->factory()->ToBoolean(done));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
@@ -2417,8 +2333,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
}
-void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
- int* used_store_slots) {
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// Constructor is in eax.
DCHECK(lit != NULL);
__ push(eax);
@@ -2450,8 +2365,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit,
}
VisitForStackValue(value);
- EmitSetHomeObjectIfNeeded(value, 2,
- lit->SlotForHomeObject(value, used_store_slots));
+ if (NeedsHomeObject(value)) {
+ EmitSetHomeObject(value, 2, property->GetSlot());
+ }
switch (property->kind()) {
case ObjectLiteral::Property::CONSTANT:
@@ -3148,18 +3064,14 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- }
-
- __ LoadHeapObject(ebx, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallNewFeedbackSlot())));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
}
@@ -3192,16 +3104,7 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ mov(edi, Operand(esp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
- if (FLAG_pretenuring_call_new) {
- UNREACHABLE();
- /* TODO(dslomov): support pretenuring.
- EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
- DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
- expr->CallNewFeedbackSlot().ToInt() + 1);
- */
- }
-
- __ LoadHeapObject(ebx, FeedbackVector());
+ __ EmitLoadTypeFeedbackVector(ebx);
__ mov(edx, Immediate(SmiFromSlot(expr->CallFeedbackSlot())));
CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
@@ -3209,6 +3112,8 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
RecordJSReturnSite(expr);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->Plug(eax);
}
@@ -3234,59 +3139,6 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, Immediate(kSmiTagMask | 0x80000000));
- Split(zero, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
-void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ JumpIfSmi(eax, if_false);
- __ cmp(eax, isolate()->factory()->null_value());
- __ j(equal, if_true);
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined when tested with typeof.
- __ movzx_b(ecx, FieldOperand(ebx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, if_false);
- __ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, if_false);
- __ cmp(ecx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(below_equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3331,97 +3183,6 @@ void FullCodeGenerator::EmitIsSimdValue(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- DCHECK(args->length() == 1);
-
- VisitForAccumulatorValue(args->at(0));
-
- Label materialize_true, materialize_false, skip_lookup;
- Label* if_true = NULL;
- Label* if_false = NULL;
- Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
-
- __ AssertNotSmi(eax);
-
- // Check whether this map has already been checked to be safe for default
- // valueOf.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
- __ test_b(FieldOperand(ebx, Map::kBitField2Offset),
- 1 << Map::kStringWrapperSafeForDefaultValueOf);
- __ j(not_zero, &skip_lookup);
-
- // Check for fast case object. Return false for slow case objects.
- __ mov(ecx, FieldOperand(eax, JSObject::kPropertiesOffset));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, isolate()->factory()->hash_table_map());
- __ j(equal, if_false);
-
- // Look for valueOf string in the descriptor array, and indicate false if
- // found. Since we omit an enumeration index check, if it is added via a
- // transition that shares its descriptor array, this is a false positive.
- Label entry, loop, done;
-
- // Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(ecx, ebx);
- __ cmp(ecx, 0);
- __ j(equal, &done);
-
- __ LoadInstanceDescriptors(ebx, ebx);
- // ebx: descriptor array.
- // ecx: valid entries in the descriptor array.
- // Calculate the end of the descriptor array.
- STATIC_ASSERT(kSmiTag == 0);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kPointerSize == 4);
- __ imul(ecx, ecx, DescriptorArray::kDescriptorSize);
- __ lea(ecx, Operand(ebx, ecx, times_4, DescriptorArray::kFirstOffset));
- // Calculate location of the first key name.
- __ add(ebx, Immediate(DescriptorArray::kFirstOffset));
- // Loop through all the keys in the descriptor array. If one of these is the
- // internalized string "valueOf" the result is false.
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(edx, FieldOperand(ebx, 0));
- __ cmp(edx, isolate()->factory()->value_of_string());
- __ j(equal, if_false);
- __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize));
- __ bind(&entry);
- __ cmp(ebx, ecx);
- __ j(not_equal, &loop);
-
- __ bind(&done);
-
- // Reload map as register ebx was used as temporary above.
- __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
-
- // Set the bit in the map to indicate that there is no local valueOf field.
- __ or_(FieldOperand(ebx, Map::kBitField2Offset),
- Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
-
- __ bind(&skip_lookup);
-
- // If a valueOf property is not found on the object check that its
- // prototype is the un-modified String prototype. If not result is false.
- __ mov(ecx, FieldOperand(ebx, Map::kPrototypeOffset));
- __ JumpIfSmi(ecx, if_false);
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ mov(edx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(edx,
- FieldOperand(edx, GlobalObject::kNativeContextOffset));
- __ cmp(ecx,
- ContextOperand(edx,
- Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(equal, if_true, if_false, fall_through);
-
- context()->Plug(if_true, if_false);
-}
-
-
void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3893,6 +3654,23 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to an integer.
+ Label done_convert;
+ __ JumpIfSmi(eax, &done_convert, Label::kNear);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kToInteger, 1);
+ __ bind(&done_convert);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
@@ -3906,6 +3684,40 @@ void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitToString(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ ToStringStub stub(isolate());
+ __ CallStub(&stub);
+ context()->Plug(eax);
+}
+
+
+void FullCodeGenerator::EmitToName(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(1, args->length());
+
+ // Load the argument into eax and convert it.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Convert the object to a name.
+ Label convert, done_convert;
+ __ JumpIfSmi(eax, &convert, Label::kNear);
+ STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE);
+ __ CmpObjectType(eax, LAST_NAME_TYPE, ecx);
+ __ j(below_equal, &done_convert, Label::kNear);
+ __ bind(&convert);
+ __ Push(eax);
+ __ CallRuntime(Runtime::kToName, 1);
+ __ bind(&done_convert);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitToObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
@@ -4045,6 +3857,26 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCall(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_LE(2, args->length());
+ // Push target, receiver and arguments onto the stack.
+ for (Expression* const arg : *args) {
+ VisitForStackValue(arg);
+ }
+ // Move target to edi.
+ int const argc = args->length() - 2;
+ __ mov(edi, Operand(esp, (argc + 1) * kPointerSize));
+ // Call the target.
+ __ mov(eax, Immediate(argc));
+ __ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ // Discard the function left on TOS.
+ context()->DropAndPlug(1, eax);
+}
+
+
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() >= 2);
@@ -4070,7 +3902,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
__ bind(&runtime);
__ push(eax);
- __ CallRuntime(Runtime::kCall, args->length());
+ __ CallRuntime(Runtime::kCallFunction, args->length());
__ bind(&done);
context()->Plug(eax);
@@ -4081,16 +3913,9 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- // new.target
+ // Evaluate new.target and super constructor.
VisitForStackValue(args->at(0));
-
- // .this_function
VisitForStackValue(args->at(1));
- __ CallRuntime(Runtime::kGetPrototype, 1);
- __ push(result_register());
-
- // Load original constructor into ecx.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, args_set_up, runtime;
@@ -4121,14 +3946,14 @@ void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
__ bind(&args_set_up);
- __ mov(edi, Operand(esp, eax, times_pointer_size, 0));
- __ mov(ebx, Immediate(isolate()->factory()->undefined_value()));
- CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
- __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ mov(edx, Operand(esp, eax, times_pointer_size, 1 * kPointerSize));
+ __ mov(edi, Operand(esp, eax, times_pointer_size, 0 * kPointerSize));
+ __ Call(isolate()->builtins()->Construct(), RelocInfo::CONSTRUCT_CALL);
- __ Drop(1);
+ // Restore context register.
+ __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- context()->Plug(eax);
+ context()->DropAndPlug(1, eax);
}
@@ -4459,17 +4284,43 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
+ ZoneList<Expression*>* args = expr->arguments();
+ DCHECK_EQ(2, args->length());
+ VisitForStackValue(args->at(0));
+ VisitForStackValue(args->at(1));
+
+ Label runtime, done;
+
+ __ Allocate(JSIteratorResult::kSize, eax, ecx, edx, &runtime, TAG_OBJECT);
+ __ mov(ebx, GlobalObjectOperand());
+ __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+ __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ isolate()->factory()->empty_fixed_array());
+ __ pop(FieldOperand(eax, JSIteratorResult::kDoneOffset));
+ __ pop(FieldOperand(eax, JSIteratorResult::kValueOffset));
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&runtime);
+ __ CallRuntime(Runtime::kCreateIterResultObject, 2);
+
+ __ bind(&done);
+ context()->Plug(eax);
+}
+
+
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
- // Push the builtins object as receiver.
- __ mov(eax, GlobalObjectOperand());
- __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
+ // Push undefined as receiver.
+ __ push(Immediate(isolate()->factory()->undefined_value()));
- // Load the function from the receiver.
- __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
- __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
- __ mov(LoadDescriptor::SlotRegister(),
- Immediate(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
- CallLoadIC(NOT_INSIDE_TYPEOF);
+ __ mov(eax, GlobalObjectOperand());
+ __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
+ __ mov(eax, ContextOperand(eax, expr->context_index()));
}
@@ -4955,27 +4806,27 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ JumpIfSmi(eax, if_false);
// Check for undetectable objects => true.
__ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
- __ test(ecx, Immediate(1 << Map::kIsUndetectable));
+ __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
+ 1 << Map::kIsUndetectable);
Split(not_zero, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(eax, if_false);
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx);
- __ j(equal, if_true);
- __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE);
+ // Check for callable and not undetectable objects => true.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ecx, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ __ cmp(ecx, 1 << Map::kIsCallable);
Split(equal, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
__ JumpIfSmi(eax, if_false);
__ cmp(eax, isolate()->factory()->null_value());
__ j(equal, if_true);
- __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, edx);
__ j(below, if_false);
- __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, if_false);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ test_b(FieldOperand(edx, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
Split(zero, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
@@ -5016,20 +4867,20 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::IN:
VisitForStackValue(expr->right());
- __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kHasProperty, 2);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ cmp(eax, isolate()->factory()->true_value());
Split(equal, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
- VisitForStackValue(expr->right());
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+ VisitForAccumulatorValue(expr->right());
+ __ Pop(edx);
+ InstanceOfStub stub(isolate());
__ CallStub(&stub);
- PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ test(eax, eax);
- // The stub returns 0 for true.
- Split(zero, if_true, if_false, fall_through);
+ PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+ __ cmp(eax, isolate()->factory()->true_value());
+ Split(equal, if_true, if_false, fall_through);
break;
}
diff --git a/deps/v8/src/futex-emulation.cc b/deps/v8/src/futex-emulation.cc
index 5a0ce07f1a..b0e514e8af 100644
--- a/deps/v8/src/futex-emulation.cc
+++ b/deps/v8/src/futex-emulation.cc
@@ -21,6 +21,23 @@ base::LazyInstance<FutexWaitList>::type FutexEmulation::wait_list_ =
LAZY_INSTANCE_INITIALIZER;
+void FutexWaitListNode::NotifyWake() {
+ // Lock the FutexEmulation mutex before notifying. We know that the mutex
+ // will have been unlocked if we are currently waiting on the condition
+ // variable.
+ //
+ // The mutex may also not be locked if the other thread is currently handling
+ // interrupts, or if FutexEmulation::Wait was just called and the mutex
+ // hasn't been locked yet. In either of those cases, we set the interrupted
+ // flag to true, which will be tested after the mutex is re-locked.
+ base::LockGuard<base::Mutex> lock_guard(FutexEmulation::mutex_.Pointer());
+ if (waiting_) {
+ cond_.NotifyOne();
+ interrupted_ = true;
+ }
+}
+
+
FutexWaitList::FutexWaitList() : head_(nullptr), tail_(nullptr) {}
@@ -58,12 +75,6 @@ void FutexWaitList::RemoveNode(FutexWaitListNode* node) {
Object* FutexEmulation::Wait(Isolate* isolate,
Handle<JSArrayBuffer> array_buffer, size_t addr,
int32_t value, double rel_timeout_ms) {
- // We never want to wait longer than this amount of time; this way we can
- // interrupt this thread even if this is an "infinitely blocking" wait.
- // TODO(binji): come up with a better way of interrupting only when
- // necessary, rather than busy-waiting.
- const base::TimeDelta kMaxWaitTime = base::TimeDelta::FromMilliseconds(50);
-
DCHECK(addr < NumberToSize(isolate, array_buffer->byte_length()));
void* backing_store = array_buffer->backing_store();
@@ -103,41 +114,75 @@ Object* FutexEmulation::Wait(Isolate* isolate,
base::TimeTicks start_time = base::TimeTicks::Now();
base::TimeTicks timeout_time = start_time + rel_timeout;
+ base::TimeTicks current_time = start_time;
wait_list_.Pointer()->AddNode(node);
Object* result;
while (true) {
- base::TimeTicks current_time = base::TimeTicks::Now();
- if (use_timeout && current_time > timeout_time) {
- result = Smi::FromInt(Result::kTimedOut);
- break;
+ bool interrupted = node->interrupted_;
+ node->interrupted_ = false;
+
+ // Unlock the mutex here to prevent deadlock from lock ordering between
+ // mutex_ and mutexes locked by HandleInterrupts.
+ mutex_.Pointer()->Unlock();
+
+ // Because the mutex is unlocked, we have to be careful about not dropping
+ // an interrupt. The notification can happen in three different places:
+ // 1) Before Wait is called: the notification will be dropped, but
+ // interrupted_ will be set to 1. This will be checked below.
+ // 2) After interrupted has been checked here, but before mutex_ is
+ // acquired: interrupted is checked again below, with mutex_ locked.
+ // Because the wakeup signal also acquires mutex_, we know it will not
+ // be able to notify until mutex_ is released below, when waiting on the
+ // condition variable.
+ // 3) After the mutex is released in the call to WaitFor(): this
+ // notification will wake up the condition variable. node->waiting() will
+ // be false, so we'll loop and then check interrupts.
+ if (interrupted) {
+ Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
+ if (interrupt_object->IsException()) {
+ result = interrupt_object;
+ mutex_.Pointer()->Lock();
+ break;
+ }
}
- base::TimeDelta time_until_timeout = timeout_time - current_time;
- base::TimeDelta time_to_wait =
- (use_timeout && time_until_timeout < kMaxWaitTime) ? time_until_timeout
- : kMaxWaitTime;
+ mutex_.Pointer()->Lock();
- bool wait_for_result = node->cond_.WaitFor(mutex_.Pointer(), time_to_wait);
- USE(wait_for_result);
+ if (node->interrupted_) {
+ // An interrupt occured while the mutex_ was unlocked. Don't wait yet.
+ continue;
+ }
if (!node->waiting_) {
result = Smi::FromInt(Result::kOk);
break;
}
- // Spurious wakeup or timeout. Potentially handle interrupts before
- // continuing to wait.
- Object* interrupt_object = isolate->stack_guard()->HandleInterrupts();
- if (interrupt_object->IsException()) {
- result = interrupt_object;
- break;
+ // No interrupts, now wait.
+ if (use_timeout) {
+ current_time = base::TimeTicks::Now();
+ if (current_time >= timeout_time) {
+ result = Smi::FromInt(Result::kTimedOut);
+ break;
+ }
+
+ base::TimeDelta time_until_timeout = timeout_time - current_time;
+ DCHECK(time_until_timeout.InMicroseconds() >= 0);
+ bool wait_for_result =
+ node->cond_.WaitFor(mutex_.Pointer(), time_until_timeout);
+ USE(wait_for_result);
+ } else {
+ node->cond_.Wait(mutex_.Pointer());
}
+
+ // Spurious wakeup, interrupt or timeout.
}
wait_list_.Pointer()->RemoveNode(node);
+ node->waiting_ = false;
return result;
}
diff --git a/deps/v8/src/futex-emulation.h b/deps/v8/src/futex-emulation.h
index 86b5f78811..e7e2230da2 100644
--- a/deps/v8/src/futex-emulation.h
+++ b/deps/v8/src/futex-emulation.h
@@ -8,6 +8,7 @@
#include <stdint.h>
#include "src/allocation.h"
+#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
@@ -40,7 +41,10 @@ class FutexWaitListNode {
next_(nullptr),
backing_store_(nullptr),
wait_addr_(0),
- waiting_(false) {}
+ waiting_(false),
+ interrupted_(false) {}
+
+ void NotifyWake();
private:
friend class FutexEmulation;
@@ -52,6 +56,7 @@ class FutexWaitListNode {
void* backing_store_;
size_t wait_addr_;
bool waiting_;
+ bool interrupted_;
DISALLOW_COPY_AND_ASSIGN(FutexWaitListNode);
};
@@ -115,6 +120,8 @@ class FutexEmulation : public AllStatic {
size_t addr);
private:
+ friend class FutexWaitListNode;
+
static base::LazyMutex mutex_;
static base::LazyInstance<FutexWaitList>::type wait_list_;
};
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
index d041c5b223..56579c59d4 100644
--- a/deps/v8/src/generator.js
+++ b/deps/v8/src/generator.js
@@ -12,8 +12,8 @@
// Imports
var GlobalFunction = global.Function;
-
var NewFunctionString;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
NewFunctionString = from.NewFunctionString;
@@ -102,10 +102,10 @@ utils.InstallFunctions(GeneratorObjectPrototype,
%AddNamedProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | READ_ONLY);
%AddNamedProperty(GeneratorObjectPrototype,
- symbolToStringTag, "Generator", DONT_ENUM | READ_ONLY);
+ toStringTagSymbol, "Generator", DONT_ENUM | READ_ONLY);
%InternalSetPrototype(GeneratorFunctionPrototype, GlobalFunction.prototype);
%AddNamedProperty(GeneratorFunctionPrototype,
- symbolToStringTag, "GeneratorFunction", DONT_ENUM | READ_ONLY);
+ toStringTagSymbol, "GeneratorFunction", DONT_ENUM | READ_ONLY);
%AddNamedProperty(GeneratorFunctionPrototype, "constructor",
GeneratorFunction, DONT_ENUM | READ_ONLY);
%InternalSetPrototype(GeneratorFunction, GlobalFunction);
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index befa173767..650999f394 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -2,11 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
-#include "src/api.h"
#include "src/global-handles.h"
+#include "src/api.h"
+#include "src/v8.h"
#include "src/vm-state-inl.h"
namespace v8 {
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 5f1070382c..9d4bafb6ff 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -431,13 +431,14 @@ class MarkCompactCollector;
class NewSpace;
class Object;
class OldSpace;
+class ParameterCount;
class Foreign;
class Scope;
class ScopeInfo;
class Script;
class Smi;
template <typename Config, class Allocator = FreeStoreAllocationPolicy>
- class SplayTree;
+class SplayTree;
class String;
class Symbol;
class Name;
@@ -600,6 +601,10 @@ enum CallConstructorFlags {
NO_CALL_CONSTRUCTOR_FLAGS = 0,
// The call target is cached in the instruction stream.
RECORD_CONSTRUCTOR_TARGET = 1,
+ // TODO(bmeurer): Kill these SUPER_* modes and use the Construct builtin
+ // directly instead; also there's no point in collecting any "targets" for
+ // super constructor calls, since these are known when we optimize the
+ // constructor that contains the super call.
SUPER_CONSTRUCTOR_CALL = 1 << 1,
SUPER_CALL_RECORD_TARGET = SUPER_CONSTRUCTOR_CALL | RECORD_CONSTRUCTOR_TARGET
};
@@ -769,6 +774,10 @@ const uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
+// ES6 section 20.1.2.6 Number.MAX_SAFE_INTEGER
+const double kMaxSafeInteger = 9007199254740991.0; // 2^53-1
+
+
// The order of this enum has to be kept in sync with the predicates below.
enum VariableMode {
// User declared variables:
@@ -992,7 +1001,7 @@ inline bool IsSubclassConstructor(FunctionKind kind) {
}
-inline bool IsConstructor(FunctionKind kind) {
+inline bool IsClassConstructor(FunctionKind kind) {
DCHECK(IsValidFunctionKind(kind));
return kind &
(FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index b905c16a04..8c547e1b9c 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -26,6 +26,12 @@ HandleScope::HandleScope(Isolate* isolate) {
}
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, Handle<T> handle) {
+ return os << Brief(*handle);
+}
+
+
HandleScope::~HandleScope() {
#ifdef DEBUG
if (FLAG_check_handle_count) {
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index ca23a6f75f..ae6fac89d3 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/handles.h"
+#include "src/base/logging.h"
+#include "src/objects-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 2db18307da..85fa839f3f 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -6,6 +6,7 @@
#define V8_HANDLES_H_
#include "include/v8.h"
+#include "src/base/functional.h"
#include "src/base/macros.h"
#include "src/checks.h"
#include "src/globals.h"
@@ -70,8 +71,14 @@ class HandleBase {
// ----------------------------------------------------------------------------
// A Handle provides a reference to an object that survives relocation by
// the garbage collector.
-// Handles are only valid within a HandleScope.
-// When a handle is created for an object a cell is allocated in the heap.
+//
+// Handles are only valid within a HandleScope. When a handle is created
+// for an object a cell is allocated in the current HandleScope.
+//
+// Also note that Handles do not provide default equality comparison or hashing
+// operators on purpose. Such operators would be misleading, because intended
+// semantics is ambiguous between Handle location and object identity. Instead
+// use either {is_identical_to} or {location} explicitly.
template <typename T>
class Handle final : public HandleBase {
public:
@@ -118,6 +125,20 @@ class Handle final : public HandleBase {
// MaybeHandle to force validation before being used as handles.
static const Handle<T> null() { return Handle<T>(); }
+ // Provide function object for location equality comparison.
+ struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
+ V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
+ return lhs.location() == rhs.location();
+ }
+ };
+
+ // Provide function object for location hashing.
+ struct hash : public std::unary_function<Handle<T>, size_t> {
+ V8_INLINE size_t operator()(Handle<T> const& handle) const {
+ return base::hash<void*>()(handle.location());
+ }
+ };
+
private:
// Handles of different classes are allowed to access each other's location_.
template <typename>
@@ -128,6 +149,9 @@ class Handle final : public HandleBase {
};
template <typename T>
+inline std::ostream& operator<<(std::ostream& os, Handle<T> handle);
+
+template <typename T>
V8_INLINE Handle<T> handle(T* object, Isolate* isolate) {
return Handle<T>(object, isolate);
}
@@ -142,7 +166,12 @@ V8_INLINE Handle<T> handle(T* object) {
// A Handle can be converted into a MaybeHandle. Converting a MaybeHandle
// into a Handle requires checking that it does not point to NULL. This
// ensures NULL checks before use.
+//
// Do not use MaybeHandle as argument type.
+//
+// Also note that Handles do not provide default equality comparison or hashing
+// operators on purpose. Such operators would be misleading, because intended
+// semantics is ambiguous between Handle location and object identity.
template <typename T>
class MaybeHandle final {
public:
@@ -193,15 +222,6 @@ class MaybeHandle final {
bool is_null() const { return location_ == nullptr; }
- template <typename S>
- V8_INLINE bool operator==(MaybeHandle<S> that) const {
- return this->location_ == that.location_;
- }
- template <typename S>
- V8_INLINE bool operator!=(MaybeHandle<S> that) const {
- return this->location_ != that.location_;
- }
-
protected:
T** location_ = nullptr;
@@ -209,19 +229,10 @@ class MaybeHandle final {
// other's location_.
template <typename>
friend class MaybeHandle;
- // Utility functions are allowed to access location_.
- template <typename S>
- friend size_t hash_value(MaybeHandle<S>);
};
-template <typename T>
-V8_INLINE size_t hash_value(MaybeHandle<T> maybe_handle) {
- uintptr_t v = bit_cast<uintptr_t>(maybe_handle.location_);
- DCHECK_EQ(0u, v & ((1u << kPointerSizeLog2) - 1));
- return v >> kPointerSizeLog2;
-}
-
+// ----------------------------------------------------------------------------
// A stack-allocated class that governs a number of local handles.
// After a handle scope has been created, all local handles will be
// allocated within that handle scope until either the handle scope is
diff --git a/deps/v8/src/harmony-array-includes.js b/deps/v8/src/harmony-array-includes.js
index 124edf62ec..a6b59137d2 100644
--- a/deps/v8/src/harmony-array-includes.js
+++ b/deps/v8/src/harmony-array-includes.js
@@ -20,7 +20,7 @@ function InnerArrayIncludes(searchElement, fromIndex, array, length) {
return false;
}
- var n = $toInteger(fromIndex);
+ var n = TO_INTEGER(fromIndex);
var k;
if (n >= 0) {
@@ -49,7 +49,7 @@ function ArrayIncludes(searchElement, fromIndex) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.includes");
var array = TO_OBJECT(this);
- var length = $toLength(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayIncludes(searchElement, fromIndex, array, length);
}
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index 49176460ad..0867f7cd46 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -14,7 +14,7 @@
var GetIterator;
var GetMethod;
var GlobalArray = global.Array;
-var GlobalSymbol = global.Symbol;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
var MathMax;
var MathMin;
var ObjectIsFrozen;
@@ -85,27 +85,19 @@ function ArrayCopyWithin(target, start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.copyWithin");
var array = TO_OBJECT(this);
- var length = $toLength(array.length);
+ var length = TO_LENGTH(array.length);
return InnerArrayCopyWithin(target, start, end, array, length);
}
function InnerArrayFind(predicate, thisArg, array, length) {
- if (!IS_SPEC_FUNCTION(predicate)) {
+ if (!IS_CALLABLE(predicate)) {
throw MakeTypeError(kCalledNonCallable, predicate);
}
- var needs_wrapper = false;
- if (IS_NULL(thisArg)) {
- if (%IsSloppyModeFunction(predicate)) thisArg = UNDEFINED;
- } else if (!IS_UNDEFINED(thisArg)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(predicate, thisArg);
- }
-
for (var i = 0; i < length; i++) {
var element = array[i];
- var newThisArg = needs_wrapper ? TO_OBJECT(thisArg) : thisArg;
- if (%_CallFunction(newThisArg, element, i, array, predicate)) {
+ if (%_Call(predicate, thisArg, element, i, array)) {
return element;
}
}
@@ -118,27 +110,19 @@ function ArrayFind(predicate, thisArg) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.find");
var array = TO_OBJECT(this);
- var length = $toInteger(array.length);
+ var length = TO_INTEGER(array.length);
return InnerArrayFind(predicate, thisArg, array, length);
}
function InnerArrayFindIndex(predicate, thisArg, array, length) {
- if (!IS_SPEC_FUNCTION(predicate)) {
+ if (!IS_CALLABLE(predicate)) {
throw MakeTypeError(kCalledNonCallable, predicate);
}
- var needs_wrapper = false;
- if (IS_NULL(thisArg)) {
- if (%IsSloppyModeFunction(predicate)) thisArg = UNDEFINED;
- } else if (!IS_UNDEFINED(thisArg)) {
- needs_wrapper = SHOULD_CREATE_WRAPPER(predicate, thisArg);
- }
-
for (var i = 0; i < length; i++) {
var element = array[i];
- var newThisArg = needs_wrapper ? TO_OBJECT(thisArg) : thisArg;
- if (%_CallFunction(newThisArg, element, i, array, predicate)) {
+ if (%_Call(predicate, thisArg, element, i, array)) {
return i;
}
}
@@ -151,7 +135,7 @@ function ArrayFindIndex(predicate, thisArg) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.findIndex");
var array = TO_OBJECT(this);
- var length = $toInteger(array.length);
+ var length = TO_INTEGER(array.length);
return InnerArrayFindIndex(predicate, thisArg, array, length);
}
@@ -189,7 +173,7 @@ function ArrayFill(value, start, end) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.fill");
var array = TO_OBJECT(this);
- var length = TO_UINT32(array.length);
+ var length = TO_LENGTH_OR_UINT32(array.length);
return InnerArrayFill(value, start, end, array, length);
}
@@ -210,18 +194,12 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
var mapping = !IS_UNDEFINED(mapfn);
if (mapping) {
- if (!IS_SPEC_FUNCTION(mapfn)) {
+ if (!IS_CALLABLE(mapfn)) {
throw MakeTypeError(kCalledNonCallable, mapfn);
- } else if (%IsSloppyModeFunction(mapfn)) {
- if (IS_NULL(receiver)) {
- receiver = UNDEFINED;
- } else if (!IS_UNDEFINED(receiver)) {
- receiver = TO_OBJECT(receiver);
- }
}
}
- var iterable = GetMethod(items, symbolIterator);
+ var iterable = GetMethod(items, iteratorSymbol);
var k;
var result;
var mappedValue;
@@ -247,7 +225,7 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
nextValue = next.value;
if (mapping) {
- mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
} else {
mappedValue = nextValue;
}
@@ -255,13 +233,13 @@ function ArrayFrom(arrayLike, mapfn, receiver) {
k++;
}
} else {
- var len = $toLength(items.length);
+ var len = TO_LENGTH(items.length);
result = %IsConstructor(this) ? new this(len) : new GlobalArray(len);
for (k = 0; k < len; ++k) {
nextValue = items[k];
if (mapping) {
- mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
+ mappedValue = %_Call(mapfn, receiver, nextValue, k);
} else {
mappedValue = nextValue;
}
diff --git a/deps/v8/src/harmony-atomics.js b/deps/v8/src/harmony-atomics.js
index d4e069641a..b1b529fe86 100644
--- a/deps/v8/src/harmony-atomics.js
+++ b/deps/v8/src/harmony-atomics.js
@@ -14,6 +14,7 @@
var GlobalObject = global.Object;
var MathMax;
var ToNumber;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MathMax = from.MathMax;
@@ -23,12 +24,6 @@ utils.Import(function(from) {
// -------------------------------------------------------------------
-function CheckSharedTypedArray(sta) {
- if (!%IsSharedTypedArray(sta)) {
- throw MakeTypeError(kNotSharedTypedArray, sta);
- }
-}
-
function CheckSharedIntegerTypedArray(ia) {
if (!%IsSharedIntegerTypedArray(ia)) {
throw MakeTypeError(kNotIntegerSharedTypedArray, ia);
@@ -45,8 +40,8 @@ function CheckSharedInteger32TypedArray(ia) {
//-------------------------------------------------------------------
function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
- CheckSharedTypedArray(sta);
- index = $toInteger(index);
+ CheckSharedIntegerTypedArray(sta);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
@@ -56,8 +51,8 @@ function AtomicsCompareExchangeJS(sta, index, oldValue, newValue) {
}
function AtomicsLoadJS(sta, index) {
- CheckSharedTypedArray(sta);
- index = $toInteger(index);
+ CheckSharedIntegerTypedArray(sta);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
@@ -65,8 +60,8 @@ function AtomicsLoadJS(sta, index) {
}
function AtomicsStoreJS(sta, index, value) {
- CheckSharedTypedArray(sta);
- index = $toInteger(index);
+ CheckSharedIntegerTypedArray(sta);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(sta)) {
return UNDEFINED;
}
@@ -76,7 +71,7 @@ function AtomicsStoreJS(sta, index, value) {
function AtomicsAddJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -86,7 +81,7 @@ function AtomicsAddJS(ia, index, value) {
function AtomicsSubJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -96,7 +91,7 @@ function AtomicsSubJS(ia, index, value) {
function AtomicsAndJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -106,7 +101,7 @@ function AtomicsAndJS(ia, index, value) {
function AtomicsOrJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -116,7 +111,7 @@ function AtomicsOrJS(ia, index, value) {
function AtomicsXorJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -126,7 +121,7 @@ function AtomicsXorJS(ia, index, value) {
function AtomicsExchangeJS(ia, index, value) {
CheckSharedIntegerTypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -142,7 +137,7 @@ function AtomicsIsLockFreeJS(size) {
function AtomicsFutexWaitJS(ia, index, value, timeout) {
CheckSharedInteger32TypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
@@ -161,20 +156,20 @@ function AtomicsFutexWaitJS(ia, index, value, timeout) {
function AtomicsFutexWakeJS(ia, index, count) {
CheckSharedInteger32TypedArray(ia);
- index = $toInteger(index);
+ index = TO_INTEGER(index);
if (index < 0 || index >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
}
- count = MathMax(0, $toInteger(count));
+ count = MathMax(0, TO_INTEGER(count));
return %AtomicsFutexWake(ia, index, count);
}
function AtomicsFutexWakeOrRequeueJS(ia, index1, count, value, index2) {
CheckSharedInteger32TypedArray(ia);
- index1 = $toInteger(index1);
- count = MathMax(0, $toInteger(count));
+ index1 = TO_INTEGER(index1);
+ count = MathMax(0, TO_INTEGER(count));
value = TO_INT32(value);
- index2 = $toInteger(index2);
+ index2 = TO_INTEGER(index2);
if (index1 < 0 || index1 >= %_TypedArrayGetLength(ia) ||
index2 < 0 || index2 >= %_TypedArrayGetLength(ia)) {
return UNDEFINED;
@@ -192,7 +187,7 @@ var Atomics = new AtomicsConstructor();
%AddNamedProperty(global, "Atomics", Atomics, DONT_ENUM);
%FunctionSetInstanceClassName(AtomicsConstructor, 'Atomics');
-%AddNamedProperty(Atomics, symbolToStringTag, "Atomics", READ_ONLY | DONT_ENUM);
+%AddNamedProperty(Atomics, toStringTagSymbol, "Atomics", READ_ONLY | DONT_ENUM);
// These must match the values in src/futex-emulation.h
utils.InstallConstants(Atomics, [
diff --git a/deps/v8/src/harmony-concat-spreadable.js b/deps/v8/src/harmony-concat-spreadable.js
index 362701c123..c5d906a642 100644
--- a/deps/v8/src/harmony-concat-spreadable.js
+++ b/deps/v8/src/harmony-concat-spreadable.js
@@ -8,9 +8,12 @@
%CheckIsBootstrapping();
+var isConcatSpreadableSymbol =
+ utils.ImportNow("is_concat_spreadable_symbol");
+
utils.InstallConstants(global.Symbol, [
// TODO(littledan): Move to symbol.js when shipping
- "isConcatSpreadable", symbolIsConcatSpreadable
+ "isConcatSpreadable", isConcatSpreadableSymbol
]);
})
diff --git a/deps/v8/src/harmony-object.js b/deps/v8/src/harmony-object.js
deleted file mode 100644
index 12f25552e4..0000000000
--- a/deps/v8/src/harmony-object.js
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-
-(function(global, utils) {
-
-"use strict";
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalObject = global.Object;
-var OwnPropertyKeys;
-
-utils.Import(function(from) {
- OwnPropertyKeys = from.OwnPropertyKeys;
-});
-
-// -------------------------------------------------------------------
-
-// ES6, draft 04-03-15, section 19.1.2.1
-function ObjectAssign(target, sources) {
- var to = TO_OBJECT(target);
- var argsLen = %_ArgumentsLength();
- if (argsLen < 2) return to;
-
- for (var i = 1; i < argsLen; ++i) {
- var nextSource = %_Arguments(i);
- if (IS_NULL_OR_UNDEFINED(nextSource)) {
- continue;
- }
-
- var from = TO_OBJECT(nextSource);
- var keys = OwnPropertyKeys(from);
- var len = keys.length;
-
- for (var j = 0; j < len; ++j) {
- var key = keys[j];
- if (%IsPropertyEnumerable(from, key)) {
- var propValue = from[key];
- to[key] = propValue;
- }
- }
- }
- return to;
-}
-
-// Set up non-enumerable functions on the Object object.
-utils.InstallFunctions(GlobalObject, DONT_ENUM, [
- "assign", ObjectAssign
-]);
-
-})
diff --git a/deps/v8/src/harmony-reflect.js b/deps/v8/src/harmony-reflect.js
index 5ad63e1a5c..f1fe8605e5 100644
--- a/deps/v8/src/harmony-reflect.js
+++ b/deps/v8/src/harmony-reflect.js
@@ -9,10 +9,12 @@
%CheckIsBootstrapping();
var GlobalReflect = global.Reflect;
+var ReflectApply = utils.ImportNow("reflect_apply");
+var ReflectConstruct = utils.ImportNow("reflect_construct");
utils.InstallFunctions(GlobalReflect, DONT_ENUM, [
- "apply", $reflectApply,
- "construct", $reflectConstruct
+ "apply", ReflectApply,
+ "construct", ReflectConstruct
]);
})
diff --git a/deps/v8/src/harmony-regexp.js b/deps/v8/src/harmony-regexp.js
index 150716744d..1ab76fad4a 100644
--- a/deps/v8/src/harmony-regexp.js
+++ b/deps/v8/src/harmony-regexp.js
@@ -12,11 +12,6 @@
// Imports
var GlobalRegExp = global.RegExp;
-var ToString;
-
-utils.Import(function(from) {
- ToString = from.ToString;
-});
// -------------------------------------------------------------------
@@ -24,7 +19,7 @@ utils.Import(function(from) {
// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
function RegExpGetFlags() {
if (!IS_SPEC_OBJECT(this)) {
- throw MakeTypeError(kFlagsGetterNonObject, ToString(this));
+ throw MakeTypeError(kFlagsGetterNonObject, TO_STRING(this));
}
var result = '';
if (this.global) result += 'g';
diff --git a/deps/v8/src/harmony-sharedarraybuffer.js b/deps/v8/src/harmony-sharedarraybuffer.js
index 4ebfaadb2a..3a72d6c353 100644
--- a/deps/v8/src/harmony-sharedarraybuffer.js
+++ b/deps/v8/src/harmony-sharedarraybuffer.js
@@ -10,6 +10,7 @@
var GlobalSharedArrayBuffer = global.SharedArrayBuffer;
var GlobalObject = global.Object;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
// -------------------------------------------------------------------
@@ -44,7 +45,7 @@ function SharedArrayBufferIsViewJS(obj) {
GlobalSharedArrayBuffer, DONT_ENUM);
%AddNamedProperty(GlobalSharedArrayBuffer.prototype,
- symbolToStringTag, "SharedArrayBuffer", DONT_ENUM | READ_ONLY);
+ toStringTagSymbol, "SharedArrayBuffer", DONT_ENUM | READ_ONLY);
utils.InstallGetter(GlobalSharedArrayBuffer.prototype, "byteLength",
SharedArrayBufferGetByteLen);
diff --git a/deps/v8/src/harmony-simd.js b/deps/v8/src/harmony-simd.js
index 3cc18c2e90..ef3d9948b1 100644
--- a/deps/v8/src/harmony-simd.js
+++ b/deps/v8/src/harmony-simd.js
@@ -12,6 +12,7 @@
// Imports
var GlobalSIMD = global.SIMD;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro SIMD_FLOAT_TYPES(FUNCTION)
FUNCTION(Float32x4, float32x4, 4)
@@ -23,6 +24,12 @@ FUNCTION(Int16x8, int16x8, 8)
FUNCTION(Int8x16, int8x16, 16)
endmacro
+macro SIMD_UINT_TYPES(FUNCTION)
+FUNCTION(Uint32x4, uint32x4, 4)
+FUNCTION(Uint16x8, uint16x8, 8)
+FUNCTION(Uint8x16, uint8x16, 16)
+endmacro
+
macro SIMD_BOOL_TYPES(FUNCTION)
FUNCTION(Bool32x4, bool32x4, 4)
FUNCTION(Bool16x8, bool16x8, 8)
@@ -32,6 +39,7 @@ endmacro
macro SIMD_ALL_TYPES(FUNCTION)
SIMD_FLOAT_TYPES(FUNCTION)
SIMD_INT_TYPES(FUNCTION)
+SIMD_UINT_TYPES(FUNCTION)
SIMD_BOOL_TYPES(FUNCTION)
endmacro
@@ -85,33 +93,50 @@ function NAMEValueOf() {
function NAMEExtractLaneJS(instance, lane) {
return %NAMEExtractLane(instance, lane);
}
+endmacro
-function NAMEEqualJS(a, b) {
- return %NAMEEqual(a, b);
+SIMD_ALL_TYPES(DECLARE_COMMON_FUNCTIONS)
+
+macro DECLARE_SHIFT_FUNCTIONS(NAME, TYPE, LANES)
+function NAMEShiftLeftByScalarJS(instance, shift) {
+ return %NAMEShiftLeftByScalar(instance, shift);
}
-function NAMENotEqualJS(a, b) {
- return %NAMENotEqual(a, b);
+function NAMEShiftRightByScalarJS(instance, shift) {
+ return %NAMEShiftRightByScalar(instance, shift);
}
endmacro
-SIMD_ALL_TYPES(DECLARE_COMMON_FUNCTIONS)
+SIMD_INT_TYPES(DECLARE_SHIFT_FUNCTIONS)
+SIMD_UINT_TYPES(DECLARE_SHIFT_FUNCTIONS)
-macro DECLARE_INT_FUNCTIONS(NAME, TYPE, LANES)
-function NAMEShiftLeftByScalarJS(instance, shift) {
- return %NAMEShiftLeftByScalar(instance, shift);
+macro SIMD_SMALL_INT_TYPES(FUNCTION)
+FUNCTION(Int16x8)
+FUNCTION(Int8x16)
+FUNCTION(Uint8x16)
+FUNCTION(Uint16x8)
+endmacro
+
+macro DECLARE_SMALL_INT_FUNCTIONS(NAME)
+function NAMEAddSaturateJS(a, b) {
+ return %NAMEAddSaturate(a, b);
}
-function NAMEShiftRightLogicalByScalarJS(instance, shift) {
- return %NAMEShiftRightLogicalByScalar(instance, shift);
+function NAMESubSaturateJS(a, b) {
+ return %NAMESubSaturate(a, b);
}
+endmacro
-function NAMEShiftRightArithmeticByScalarJS(instance, shift) {
- return %NAMEShiftRightArithmeticByScalar(instance, shift);
+SIMD_SMALL_INT_TYPES(DECLARE_SMALL_INT_FUNCTIONS)
+
+macro DECLARE_SIGNED_FUNCTIONS(NAME, TYPE, LANES)
+function NAMENegJS(a) {
+ return %NAMENeg(a);
}
endmacro
-SIMD_INT_TYPES(DECLARE_INT_FUNCTIONS)
+SIMD_FLOAT_TYPES(DECLARE_SIGNED_FUNCTIONS)
+SIMD_INT_TYPES(DECLARE_SIGNED_FUNCTIONS)
macro DECLARE_BOOL_FUNCTIONS(NAME, TYPE, LANES)
function NAMEReplaceLaneJS(instance, lane, value) {
@@ -129,22 +154,10 @@ endmacro
SIMD_BOOL_TYPES(DECLARE_BOOL_FUNCTIONS)
-macro SIMD_UNSIGNED_INT_TYPES(FUNCTION)
-FUNCTION(Int16x8)
-FUNCTION(Int8x16)
-endmacro
-
-macro DECLARE_UNSIGNED_INT_FUNCTIONS(NAME)
-function NAMEUnsignedExtractLaneJS(instance, lane) {
- return %NAMEUnsignedExtractLane(instance, lane);
-}
-endmacro
-
-SIMD_UNSIGNED_INT_TYPES(DECLARE_UNSIGNED_INT_FUNCTIONS)
-
macro SIMD_NUMERIC_TYPES(FUNCTION)
SIMD_FLOAT_TYPES(FUNCTION)
SIMD_INT_TYPES(FUNCTION)
+SIMD_UINT_TYPES(FUNCTION)
endmacro
macro DECLARE_NUMERIC_FUNCTIONS(NAME, TYPE, LANES)
@@ -156,10 +169,6 @@ function NAMESelectJS(selector, a, b) {
return %NAMESelect(selector, a, b);
}
-function NAMENegJS(a) {
- return %NAMENeg(a);
-}
-
function NAMEAddJS(a, b) {
return %NAMEAdd(a, b);
}
@@ -180,6 +189,14 @@ function NAMEMaxJS(a, b) {
return %NAMEMax(a, b);
}
+function NAMEEqualJS(a, b) {
+ return %NAMEEqual(a, b);
+}
+
+function NAMENotEqualJS(a, b) {
+ return %NAMENotEqual(a, b);
+}
+
function NAMELessThanJS(a, b) {
return %NAMELessThan(a, b);
}
@@ -195,12 +212,21 @@ function NAMEGreaterThanJS(a, b) {
function NAMEGreaterThanOrEqualJS(a, b) {
return %NAMEGreaterThanOrEqual(a, b);
}
+
+function NAMELoadJS(tarray, index) {
+ return %NAMELoad(tarray, index);
+}
+
+function NAMEStoreJS(tarray, index, a) {
+ return %NAMEStore(tarray, index, a);
+}
endmacro
SIMD_NUMERIC_TYPES(DECLARE_NUMERIC_FUNCTIONS)
macro SIMD_LOGICAL_TYPES(FUNCTION)
SIMD_INT_TYPES(FUNCTION)
+SIMD_UINT_TYPES(FUNCTION)
SIMD_BOOL_TYPES(FUNCTION)
endmacro
@@ -226,7 +252,15 @@ SIMD_LOGICAL_TYPES(DECLARE_LOGICAL_FUNCTIONS)
macro SIMD_FROM_TYPES(FUNCTION)
FUNCTION(Float32x4, Int32x4)
+FUNCTION(Float32x4, Uint32x4)
FUNCTION(Int32x4, Float32x4)
+FUNCTION(Int32x4, Uint32x4)
+FUNCTION(Uint32x4, Float32x4)
+FUNCTION(Uint32x4, Int32x4)
+FUNCTION(Int16x8, Uint16x8)
+FUNCTION(Uint16x8, Int16x8)
+FUNCTION(Int8x16, Uint8x16)
+FUNCTION(Uint8x16, Int8x16)
endmacro
macro DECLARE_FROM_FUNCTIONS(TO, FROM)
@@ -239,17 +273,47 @@ SIMD_FROM_TYPES(DECLARE_FROM_FUNCTIONS)
macro SIMD_FROM_BITS_TYPES(FUNCTION)
FUNCTION(Float32x4, Int32x4)
+FUNCTION(Float32x4, Uint32x4)
FUNCTION(Float32x4, Int16x8)
+FUNCTION(Float32x4, Uint16x8)
FUNCTION(Float32x4, Int8x16)
+FUNCTION(Float32x4, Uint8x16)
FUNCTION(Int32x4, Float32x4)
+FUNCTION(Int32x4, Uint32x4)
FUNCTION(Int32x4, Int16x8)
+FUNCTION(Int32x4, Uint16x8)
FUNCTION(Int32x4, Int8x16)
+FUNCTION(Int32x4, Uint8x16)
+FUNCTION(Uint32x4, Float32x4)
+FUNCTION(Uint32x4, Int32x4)
+FUNCTION(Uint32x4, Int16x8)
+FUNCTION(Uint32x4, Uint16x8)
+FUNCTION(Uint32x4, Int8x16)
+FUNCTION(Uint32x4, Uint8x16)
FUNCTION(Int16x8, Float32x4)
FUNCTION(Int16x8, Int32x4)
+FUNCTION(Int16x8, Uint32x4)
+FUNCTION(Int16x8, Uint16x8)
FUNCTION(Int16x8, Int8x16)
+FUNCTION(Int16x8, Uint8x16)
+FUNCTION(Uint16x8, Float32x4)
+FUNCTION(Uint16x8, Int32x4)
+FUNCTION(Uint16x8, Uint32x4)
+FUNCTION(Uint16x8, Int16x8)
+FUNCTION(Uint16x8, Int8x16)
+FUNCTION(Uint16x8, Uint8x16)
FUNCTION(Int8x16, Float32x4)
FUNCTION(Int8x16, Int32x4)
+FUNCTION(Int8x16, Uint32x4)
FUNCTION(Int8x16, Int16x8)
+FUNCTION(Int8x16, Uint16x8)
+FUNCTION(Int8x16, Uint8x16)
+FUNCTION(Uint8x16, Float32x4)
+FUNCTION(Uint8x16, Int32x4)
+FUNCTION(Uint8x16, Uint32x4)
+FUNCTION(Uint8x16, Int16x8)
+FUNCTION(Uint8x16, Uint16x8)
+FUNCTION(Uint8x16, Int8x16)
endmacro
macro DECLARE_FROM_BITS_FUNCTIONS(TO, FROM)
@@ -260,62 +324,110 @@ endmacro
SIMD_FROM_BITS_TYPES(DECLARE_FROM_BITS_FUNCTIONS)
-//-------------------------------------------------------------------
-
-function Float32x4Constructor(c0, c1, c2, c3) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Float32x4");
- return %CreateFloat32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
- TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
-}
+macro SIMD_LOADN_STOREN_TYPES(FUNCTION)
+FUNCTION(Float32x4, 1)
+FUNCTION(Float32x4, 2)
+FUNCTION(Float32x4, 3)
+FUNCTION(Int32x4, 1)
+FUNCTION(Int32x4, 2)
+FUNCTION(Int32x4, 3)
+FUNCTION(Uint32x4, 1)
+FUNCTION(Uint32x4, 2)
+FUNCTION(Uint32x4, 3)
+endmacro
-function Float32x4Splat(s) {
- return %CreateFloat32x4(s, s, s, s);
+macro DECLARE_LOADN_STOREN_FUNCTIONS(NAME, COUNT)
+function NAMELoadCOUNTJS(tarray, index) {
+ return %NAMELoadCOUNT(tarray, index);
}
-
-function Float32x4AbsJS(a) {
- return %Float32x4Abs(a);
+function NAMEStoreCOUNTJS(tarray, index, a) {
+ return %NAMEStoreCOUNT(tarray, index, a);
}
+endmacro
+SIMD_LOADN_STOREN_TYPES(DECLARE_LOADN_STOREN_FUNCTIONS)
-function Float32x4SqrtJS(a) {
- return %Float32x4Sqrt(a);
-}
+//-------------------------------------------------------------------
+macro SIMD_X4_TYPES(FUNCTION)
+FUNCTION(Float32x4)
+FUNCTION(Int32x4)
+FUNCTION(Uint32x4)
+FUNCTION(Bool32x4)
+endmacro
-function Float32x4RecipApproxJS(a) {
- return %Float32x4RecipApprox(a);
+macro DECLARE_X4_FUNCTIONS(NAME)
+function NAMESplat(s) {
+ return %CreateNAME(s, s, s, s);
}
+function NAMESwizzleJS(a, c0, c1, c2, c3) {
+ return %NAMESwizzle(a, c0, c1, c2, c3);
+}
-function Float32x4RecipSqrtApproxJS(a) {
- return %Float32x4RecipSqrtApprox(a);
+function NAMEShuffleJS(a, b, c0, c1, c2, c3) {
+ return %NAMEShuffle(a, b, c0, c1, c2, c3);
}
+endmacro
+SIMD_X4_TYPES(DECLARE_X4_FUNCTIONS)
-function Float32x4DivJS(a, b) {
- return %Float32x4Div(a, b);
+macro SIMD_X8_TYPES(FUNCTION)
+FUNCTION(Int16x8)
+FUNCTION(Uint16x8)
+FUNCTION(Bool16x8)
+endmacro
+
+macro DECLARE_X8_FUNCTIONS(NAME)
+function NAMESplat(s) {
+ return %CreateNAME(s, s, s, s, s, s, s, s);
}
+function NAMESwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
+ return %NAMESwizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
+}
-function Float32x4MinNumJS(a, b) {
- return %Float32x4MinNum(a, b);
+function NAMEShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
+ return %NAMEShuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
}
+endmacro
+SIMD_X8_TYPES(DECLARE_X8_FUNCTIONS)
-function Float32x4MaxNumJS(a, b) {
- return %Float32x4MaxNum(a, b);
+macro SIMD_X16_TYPES(FUNCTION)
+FUNCTION(Int8x16)
+FUNCTION(Uint8x16)
+FUNCTION(Bool8x16)
+endmacro
+
+macro DECLARE_X16_FUNCTIONS(NAME)
+function NAMESplat(s) {
+ return %CreateNAME(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
}
+function NAMESwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ return %NAMESwizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15);
+}
-function Float32x4SwizzleJS(a, c0, c1, c2, c3) {
- return %Float32x4Swizzle(a, c0, c1, c2, c3);
+function NAMEShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
+ c11, c12, c13, c14, c15) {
+ return %NAMEShuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
+ c11, c12, c13, c14, c15);
}
+endmacro
+
+SIMD_X16_TYPES(DECLARE_X16_FUNCTIONS)
+//-------------------------------------------------------------------
-function Float32x4ShuffleJS(a, b, c0, c1, c2, c3) {
- return %Float32x4Shuffle(a, b, c0, c1, c2, c3);
+function Float32x4Constructor(c0, c1, c2, c3) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Float32x4");
+ return %CreateFloat32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
}
@@ -326,18 +438,10 @@ function Int32x4Constructor(c0, c1, c2, c3) {
}
-function Int32x4Splat(s) {
- return %CreateInt32x4(s, s, s, s);
-}
-
-
-function Int32x4SwizzleJS(a, c0, c1, c2, c3) {
- return %Int32x4Swizzle(a, c0, c1, c2, c3);
-}
-
-
-function Int32x4ShuffleJS(a, b, c0, c1, c2, c3) {
- return %Int32x4Shuffle(a, b, c0, c1, c2, c3);
+function Uint32x4Constructor(c0, c1, c2, c3) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint32x4");
+ return %CreateUint32x4(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3));
}
@@ -347,20 +451,6 @@ function Bool32x4Constructor(c0, c1, c2, c3) {
}
-function Bool32x4Splat(s) {
- return %CreateBool32x4(s, s, s, s);
-}
-
-function Bool32x4SwizzleJS(a, c0, c1, c2, c3) {
- return %Bool32x4Swizzle(a, c0, c1, c2, c3);
-}
-
-
-function Bool32x4ShuffleJS(a, b, c0, c1, c2, c3) {
- return %Bool32x4Shuffle(a, b, c0, c1, c2, c3);
-}
-
-
function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int16x8");
return %CreateInt16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
@@ -370,18 +460,12 @@ function Int16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
}
-function Int16x8Splat(s) {
- return %CreateInt16x8(s, s, s, s, s, s, s, s);
-}
-
-
-function Int16x8SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
- return %Int16x8Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-
-function Int16x8ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
- return %Int16x8Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
+function Uint16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint16x8");
+ return %CreateUint16x8(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
+ TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
+ TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7));
}
@@ -391,21 +475,6 @@ function Bool16x8Constructor(c0, c1, c2, c3, c4, c5, c6, c7) {
}
-function Bool16x8Splat(s) {
- return %CreateBool16x8(s, s, s, s, s, s, s, s);
-}
-
-
-function Bool16x8SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7) {
- return %Bool16x8Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-
-function Bool16x8ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7) {
- return %Bool16x8Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7);
-}
-
-
function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
c12, c13, c14, c15) {
if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Int8x16");
@@ -420,60 +489,71 @@ function Int8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
}
-function Int8x16Splat(s) {
- return %CreateInt8x16(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
+function Uint8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Uint8x16");
+ return %CreateUint8x16(TO_NUMBER_INLINE(c0), TO_NUMBER_INLINE(c1),
+ TO_NUMBER_INLINE(c2), TO_NUMBER_INLINE(c3),
+ TO_NUMBER_INLINE(c4), TO_NUMBER_INLINE(c5),
+ TO_NUMBER_INLINE(c6), TO_NUMBER_INLINE(c7),
+ TO_NUMBER_INLINE(c8), TO_NUMBER_INLINE(c9),
+ TO_NUMBER_INLINE(c10), TO_NUMBER_INLINE(c11),
+ TO_NUMBER_INLINE(c12), TO_NUMBER_INLINE(c13),
+ TO_NUMBER_INLINE(c14), TO_NUMBER_INLINE(c15));
}
-function Int8x16SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- return %Int8x16Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15);
+function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
+ c12, c13, c14, c15) {
+ if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool8x16");
+ return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
+ c13, c14, c15);
}
-function Int8x16ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
- c11, c12, c13, c14, c15) {
- return %Int8x16Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
- c11, c12, c13, c14, c15);
+function Float32x4AbsJS(a) {
+ return %Float32x4Abs(a);
}
-function Bool8x16Constructor(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Bool8x16");
- return %CreateBool8x16(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12,
- c13, c14, c15);
+function Float32x4SqrtJS(a) {
+ return %Float32x4Sqrt(a);
+}
+
+
+function Float32x4RecipApproxJS(a) {
+ return %Float32x4RecipApprox(a);
+}
+
+
+function Float32x4RecipSqrtApproxJS(a) {
+ return %Float32x4RecipSqrtApprox(a);
}
-function Bool8x16Splat(s) {
- return %CreateBool8x16(s, s, s, s, s, s, s, s, s, s, s, s, s, s, s, s);
+function Float32x4DivJS(a, b) {
+ return %Float32x4Div(a, b);
}
-function Bool8x16SwizzleJS(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15) {
- return %Bool8x16Swizzle(a, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11,
- c12, c13, c14, c15);
+function Float32x4MinNumJS(a, b) {
+ return %Float32x4MinNum(a, b);
}
-function Bool8x16ShuffleJS(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
- c11, c12, c13, c14, c15) {
- return %Bool8x16Shuffle(a, b, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10,
- c11, c12, c13, c14, c15);
+function Float32x4MaxNumJS(a, b) {
+ return %Float32x4MaxNum(a, b);
}
-%AddNamedProperty(GlobalSIMD, symbolToStringTag, 'SIMD', READ_ONLY | DONT_ENUM);
+%AddNamedProperty(GlobalSIMD, toStringTagSymbol, 'SIMD', READ_ONLY | DONT_ENUM);
macro SETUP_SIMD_TYPE(NAME, TYPE, LANES)
%SetCode(GlobalNAME, NAMEConstructor);
%FunctionSetPrototype(GlobalNAME, {});
%AddNamedProperty(GlobalNAME.prototype, 'constructor', GlobalNAME,
DONT_ENUM);
-%AddNamedProperty(GlobalNAME.prototype, symbolToStringTag, 'NAME',
+%AddNamedProperty(GlobalNAME.prototype, toStringTagSymbol, 'NAME',
DONT_ENUM | READ_ONLY);
utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
'toLocaleString', NAMEToLocaleString,
@@ -514,9 +594,21 @@ utils.InstallFunctions(GlobalFloat32x4, DONT_ENUM, [
'swizzle', Float32x4SwizzleJS,
'shuffle', Float32x4ShuffleJS,
'fromInt32x4', Float32x4FromInt32x4JS,
+ 'fromUint32x4', Float32x4FromUint32x4JS,
'fromInt32x4Bits', Float32x4FromInt32x4BitsJS,
+ 'fromUint32x4Bits', Float32x4FromUint32x4BitsJS,
'fromInt16x8Bits', Float32x4FromInt16x8BitsJS,
+ 'fromUint16x8Bits', Float32x4FromUint16x8BitsJS,
'fromInt8x16Bits', Float32x4FromInt8x16BitsJS,
+ 'fromUint8x16Bits', Float32x4FromUint8x16BitsJS,
+ 'load', Float32x4LoadJS,
+ 'load1', Float32x4Load1JS,
+ 'load2', Float32x4Load2JS,
+ 'load3', Float32x4Load3JS,
+ 'store', Float32x4StoreJS,
+ 'store1', Float32x4Store1JS,
+ 'store2', Float32x4Store2JS,
+ 'store3', Float32x4Store3JS,
]);
utils.InstallFunctions(GlobalInt32x4, DONT_ENUM, [
@@ -535,8 +627,7 @@ utils.InstallFunctions(GlobalInt32x4, DONT_ENUM, [
'xor', Int32x4XorJS,
'not', Int32x4NotJS,
'shiftLeftByScalar', Int32x4ShiftLeftByScalarJS,
- 'shiftRightLogicalByScalar', Int32x4ShiftRightLogicalByScalarJS,
- 'shiftRightArithmeticByScalar', Int32x4ShiftRightArithmeticByScalarJS,
+ 'shiftRightByScalar', Int32x4ShiftRightByScalarJS,
'lessThan', Int32x4LessThanJS,
'lessThanOrEqual', Int32x4LessThanOrEqualJS,
'greaterThan', Int32x4GreaterThanJS,
@@ -547,9 +638,64 @@ utils.InstallFunctions(GlobalInt32x4, DONT_ENUM, [
'swizzle', Int32x4SwizzleJS,
'shuffle', Int32x4ShuffleJS,
'fromFloat32x4', Int32x4FromFloat32x4JS,
+ 'fromUint32x4', Int32x4FromUint32x4JS,
'fromFloat32x4Bits', Int32x4FromFloat32x4BitsJS,
+ 'fromUint32x4Bits', Int32x4FromUint32x4BitsJS,
'fromInt16x8Bits', Int32x4FromInt16x8BitsJS,
+ 'fromUint16x8Bits', Int32x4FromUint16x8BitsJS,
'fromInt8x16Bits', Int32x4FromInt8x16BitsJS,
+ 'fromUint8x16Bits', Int32x4FromUint8x16BitsJS,
+ 'load', Int32x4LoadJS,
+ 'load1', Int32x4Load1JS,
+ 'load2', Int32x4Load2JS,
+ 'load3', Int32x4Load3JS,
+ 'store', Int32x4StoreJS,
+ 'store1', Int32x4Store1JS,
+ 'store2', Int32x4Store2JS,
+ 'store3', Int32x4Store3JS,
+]);
+
+utils.InstallFunctions(GlobalUint32x4, DONT_ENUM, [
+ 'splat', Uint32x4Splat,
+ 'check', Uint32x4CheckJS,
+ 'extractLane', Uint32x4ExtractLaneJS,
+ 'replaceLane', Uint32x4ReplaceLaneJS,
+ 'add', Uint32x4AddJS,
+ 'sub', Uint32x4SubJS,
+ 'mul', Uint32x4MulJS,
+ 'min', Uint32x4MinJS,
+ 'max', Uint32x4MaxJS,
+ 'and', Uint32x4AndJS,
+ 'or', Uint32x4OrJS,
+ 'xor', Uint32x4XorJS,
+ 'not', Uint32x4NotJS,
+ 'shiftLeftByScalar', Uint32x4ShiftLeftByScalarJS,
+ 'shiftRightByScalar', Uint32x4ShiftRightByScalarJS,
+ 'lessThan', Uint32x4LessThanJS,
+ 'lessThanOrEqual', Uint32x4LessThanOrEqualJS,
+ 'greaterThan', Uint32x4GreaterThanJS,
+ 'greaterThanOrEqual', Uint32x4GreaterThanOrEqualJS,
+ 'equal', Uint32x4EqualJS,
+ 'notEqual', Uint32x4NotEqualJS,
+ 'select', Uint32x4SelectJS,
+ 'swizzle', Uint32x4SwizzleJS,
+ 'shuffle', Uint32x4ShuffleJS,
+ 'fromFloat32x4', Uint32x4FromFloat32x4JS,
+ 'fromInt32x4', Uint32x4FromInt32x4JS,
+ 'fromFloat32x4Bits', Uint32x4FromFloat32x4BitsJS,
+ 'fromInt32x4Bits', Uint32x4FromInt32x4BitsJS,
+ 'fromInt16x8Bits', Uint32x4FromInt16x8BitsJS,
+ 'fromUint16x8Bits', Uint32x4FromUint16x8BitsJS,
+ 'fromInt8x16Bits', Uint32x4FromInt8x16BitsJS,
+ 'fromUint8x16Bits', Uint32x4FromUint8x16BitsJS,
+ 'load', Uint32x4LoadJS,
+ 'load1', Uint32x4Load1JS,
+ 'load2', Uint32x4Load2JS,
+ 'load3', Uint32x4Load3JS,
+ 'store', Uint32x4StoreJS,
+ 'store1', Uint32x4Store1JS,
+ 'store2', Uint32x4Store2JS,
+ 'store3', Uint32x4Store3JS,
]);
utils.InstallFunctions(GlobalBool32x4, DONT_ENUM, [
@@ -563,8 +709,6 @@ utils.InstallFunctions(GlobalBool32x4, DONT_ENUM, [
'not', Bool32x4NotJS,
'anyTrue', Bool32x4AnyTrueJS,
'allTrue', Bool32x4AllTrueJS,
- 'equal', Bool32x4EqualJS,
- 'notEqual', Bool32x4NotEqualJS,
'swizzle', Bool32x4SwizzleJS,
'shuffle', Bool32x4ShuffleJS,
]);
@@ -573,11 +717,12 @@ utils.InstallFunctions(GlobalInt16x8, DONT_ENUM, [
'splat', Int16x8Splat,
'check', Int16x8CheckJS,
'extractLane', Int16x8ExtractLaneJS,
- 'unsignedExtractLane', Int16x8UnsignedExtractLaneJS,
'replaceLane', Int16x8ReplaceLaneJS,
'neg', Int16x8NegJS,
'add', Int16x8AddJS,
'sub', Int16x8SubJS,
+ 'addSaturate', Int16x8AddSaturateJS,
+ 'subSaturate', Int16x8SubSaturateJS,
'mul', Int16x8MulJS,
'min', Int16x8MinJS,
'max', Int16x8MaxJS,
@@ -586,8 +731,7 @@ utils.InstallFunctions(GlobalInt16x8, DONT_ENUM, [
'xor', Int16x8XorJS,
'not', Int16x8NotJS,
'shiftLeftByScalar', Int16x8ShiftLeftByScalarJS,
- 'shiftRightLogicalByScalar', Int16x8ShiftRightLogicalByScalarJS,
- 'shiftRightArithmeticByScalar', Int16x8ShiftRightArithmeticByScalarJS,
+ 'shiftRightByScalar', Int16x8ShiftRightByScalarJS,
'lessThan', Int16x8LessThanJS,
'lessThanOrEqual', Int16x8LessThanOrEqualJS,
'greaterThan', Int16x8GreaterThanJS,
@@ -597,9 +741,53 @@ utils.InstallFunctions(GlobalInt16x8, DONT_ENUM, [
'select', Int16x8SelectJS,
'swizzle', Int16x8SwizzleJS,
'shuffle', Int16x8ShuffleJS,
+ 'fromUint16x8', Int16x8FromUint16x8JS,
'fromFloat32x4Bits', Int16x8FromFloat32x4BitsJS,
'fromInt32x4Bits', Int16x8FromInt32x4BitsJS,
+ 'fromUint32x4Bits', Int16x8FromUint32x4BitsJS,
+ 'fromUint16x8Bits', Int16x8FromUint16x8BitsJS,
'fromInt8x16Bits', Int16x8FromInt8x16BitsJS,
+ 'fromUint8x16Bits', Int16x8FromUint8x16BitsJS,
+ 'load', Int16x8LoadJS,
+ 'store', Int16x8StoreJS,
+]);
+
+utils.InstallFunctions(GlobalUint16x8, DONT_ENUM, [
+ 'splat', Uint16x8Splat,
+ 'check', Uint16x8CheckJS,
+ 'extractLane', Uint16x8ExtractLaneJS,
+ 'replaceLane', Uint16x8ReplaceLaneJS,
+ 'add', Uint16x8AddJS,
+ 'sub', Uint16x8SubJS,
+ 'addSaturate', Uint16x8AddSaturateJS,
+ 'subSaturate', Uint16x8SubSaturateJS,
+ 'mul', Uint16x8MulJS,
+ 'min', Uint16x8MinJS,
+ 'max', Uint16x8MaxJS,
+ 'and', Uint16x8AndJS,
+ 'or', Uint16x8OrJS,
+ 'xor', Uint16x8XorJS,
+ 'not', Uint16x8NotJS,
+ 'shiftLeftByScalar', Uint16x8ShiftLeftByScalarJS,
+ 'shiftRightByScalar', Uint16x8ShiftRightByScalarJS,
+ 'lessThan', Uint16x8LessThanJS,
+ 'lessThanOrEqual', Uint16x8LessThanOrEqualJS,
+ 'greaterThan', Uint16x8GreaterThanJS,
+ 'greaterThanOrEqual', Uint16x8GreaterThanOrEqualJS,
+ 'equal', Uint16x8EqualJS,
+ 'notEqual', Uint16x8NotEqualJS,
+ 'select', Uint16x8SelectJS,
+ 'swizzle', Uint16x8SwizzleJS,
+ 'shuffle', Uint16x8ShuffleJS,
+ 'fromInt16x8', Uint16x8FromInt16x8JS,
+ 'fromFloat32x4Bits', Uint16x8FromFloat32x4BitsJS,
+ 'fromInt32x4Bits', Uint16x8FromInt32x4BitsJS,
+ 'fromUint32x4Bits', Uint16x8FromUint32x4BitsJS,
+ 'fromInt16x8Bits', Uint16x8FromInt16x8BitsJS,
+ 'fromInt8x16Bits', Uint16x8FromInt8x16BitsJS,
+ 'fromUint8x16Bits', Uint16x8FromUint8x16BitsJS,
+ 'load', Uint16x8LoadJS,
+ 'store', Uint16x8StoreJS,
]);
utils.InstallFunctions(GlobalBool16x8, DONT_ENUM, [
@@ -613,8 +801,6 @@ utils.InstallFunctions(GlobalBool16x8, DONT_ENUM, [
'not', Bool16x8NotJS,
'anyTrue', Bool16x8AnyTrueJS,
'allTrue', Bool16x8AllTrueJS,
- 'equal', Bool16x8EqualJS,
- 'notEqual', Bool16x8NotEqualJS,
'swizzle', Bool16x8SwizzleJS,
'shuffle', Bool16x8ShuffleJS,
]);
@@ -623,11 +809,12 @@ utils.InstallFunctions(GlobalInt8x16, DONT_ENUM, [
'splat', Int8x16Splat,
'check', Int8x16CheckJS,
'extractLane', Int8x16ExtractLaneJS,
- 'unsignedExtractLane', Int8x16UnsignedExtractLaneJS,
'replaceLane', Int8x16ReplaceLaneJS,
'neg', Int8x16NegJS,
'add', Int8x16AddJS,
'sub', Int8x16SubJS,
+ 'addSaturate', Int8x16AddSaturateJS,
+ 'subSaturate', Int8x16SubSaturateJS,
'mul', Int8x16MulJS,
'min', Int8x16MinJS,
'max', Int8x16MaxJS,
@@ -636,8 +823,7 @@ utils.InstallFunctions(GlobalInt8x16, DONT_ENUM, [
'xor', Int8x16XorJS,
'not', Int8x16NotJS,
'shiftLeftByScalar', Int8x16ShiftLeftByScalarJS,
- 'shiftRightLogicalByScalar', Int8x16ShiftRightLogicalByScalarJS,
- 'shiftRightArithmeticByScalar', Int8x16ShiftRightArithmeticByScalarJS,
+ 'shiftRightByScalar', Int8x16ShiftRightByScalarJS,
'lessThan', Int8x16LessThanJS,
'lessThanOrEqual', Int8x16LessThanOrEqualJS,
'greaterThan', Int8x16GreaterThanJS,
@@ -647,9 +833,53 @@ utils.InstallFunctions(GlobalInt8x16, DONT_ENUM, [
'select', Int8x16SelectJS,
'swizzle', Int8x16SwizzleJS,
'shuffle', Int8x16ShuffleJS,
+ 'fromUint8x16', Int8x16FromUint8x16JS,
'fromFloat32x4Bits', Int8x16FromFloat32x4BitsJS,
'fromInt32x4Bits', Int8x16FromInt32x4BitsJS,
+ 'fromUint32x4Bits', Int8x16FromUint32x4BitsJS,
'fromInt16x8Bits', Int8x16FromInt16x8BitsJS,
+ 'fromUint16x8Bits', Int8x16FromUint16x8BitsJS,
+ 'fromUint8x16Bits', Int8x16FromUint8x16BitsJS,
+ 'load', Int8x16LoadJS,
+ 'store', Int8x16StoreJS,
+]);
+
+utils.InstallFunctions(GlobalUint8x16, DONT_ENUM, [
+ 'splat', Uint8x16Splat,
+ 'check', Uint8x16CheckJS,
+ 'extractLane', Uint8x16ExtractLaneJS,
+ 'replaceLane', Uint8x16ReplaceLaneJS,
+ 'add', Uint8x16AddJS,
+ 'sub', Uint8x16SubJS,
+ 'addSaturate', Uint8x16AddSaturateJS,
+ 'subSaturate', Uint8x16SubSaturateJS,
+ 'mul', Uint8x16MulJS,
+ 'min', Uint8x16MinJS,
+ 'max', Uint8x16MaxJS,
+ 'and', Uint8x16AndJS,
+ 'or', Uint8x16OrJS,
+ 'xor', Uint8x16XorJS,
+ 'not', Uint8x16NotJS,
+ 'shiftLeftByScalar', Uint8x16ShiftLeftByScalarJS,
+ 'shiftRightByScalar', Uint8x16ShiftRightByScalarJS,
+ 'lessThan', Uint8x16LessThanJS,
+ 'lessThanOrEqual', Uint8x16LessThanOrEqualJS,
+ 'greaterThan', Uint8x16GreaterThanJS,
+ 'greaterThanOrEqual', Uint8x16GreaterThanOrEqualJS,
+ 'equal', Uint8x16EqualJS,
+ 'notEqual', Uint8x16NotEqualJS,
+ 'select', Uint8x16SelectJS,
+ 'swizzle', Uint8x16SwizzleJS,
+ 'shuffle', Uint8x16ShuffleJS,
+ 'fromInt8x16', Uint8x16FromInt8x16JS,
+ 'fromFloat32x4Bits', Uint8x16FromFloat32x4BitsJS,
+ 'fromInt32x4Bits', Uint8x16FromInt32x4BitsJS,
+ 'fromUint32x4Bits', Uint8x16FromUint32x4BitsJS,
+ 'fromInt16x8Bits', Uint8x16FromInt16x8BitsJS,
+ 'fromUint16x8Bits', Uint8x16FromUint16x8BitsJS,
+ 'fromInt8x16Bits', Uint8x16FromInt8x16BitsJS,
+ 'load', Uint8x16LoadJS,
+ 'store', Uint8x16StoreJS,
]);
utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
@@ -663,8 +893,6 @@ utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
'not', Bool8x16NotJS,
'anyTrue', Bool8x16AnyTrueJS,
'allTrue', Bool8x16AllTrueJS,
- 'equal', Bool8x16EqualJS,
- 'notEqual', Bool8x16NotEqualJS,
'swizzle', Bool8x16SwizzleJS,
'shuffle', Bool8x16ShuffleJS,
]);
@@ -672,10 +900,13 @@ utils.InstallFunctions(GlobalBool8x16, DONT_ENUM, [
utils.Export(function(to) {
to.Float32x4ToString = Float32x4ToString;
to.Int32x4ToString = Int32x4ToString;
+ to.Uint32x4ToString = Uint32x4ToString;
to.Bool32x4ToString = Bool32x4ToString;
to.Int16x8ToString = Int16x8ToString;
+ to.Uint16x8ToString = Uint16x8ToString;
to.Bool16x8ToString = Bool16x8ToString;
to.Int8x16ToString = Int8x16ToString;
+ to.Uint8x16ToString = Uint8x16ToString;
to.Bool8x16ToString = Bool8x16ToString;
});
diff --git a/deps/v8/src/harmony-spread.js b/deps/v8/src/harmony-spread.js
index bfd6acb3a1..b271c7efe5 100644
--- a/deps/v8/src/harmony-spread.js
+++ b/deps/v8/src/harmony-spread.js
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $spreadArguments;
-var $spreadIterable;
-
(function(global, utils) {
'use strict';
@@ -43,7 +40,12 @@ function SpreadIterable(collection) {
return args;
}
-$spreadArguments = SpreadArguments;
-$spreadIterable = SpreadIterable;
+// ----------------------------------------------------------------------------
+// Exports
+
+%InstallToContext([
+ "spread_arguments", SpreadArguments,
+ "spread_iterable", SpreadIterable,
+]);
})
diff --git a/deps/v8/src/harmony-tostring.js b/deps/v8/src/harmony-tostring.js
index e234781da8..8e76c3a5bb 100644
--- a/deps/v8/src/harmony-tostring.js
+++ b/deps/v8/src/harmony-tostring.js
@@ -9,10 +9,11 @@
%CheckIsBootstrapping();
var GlobalSymbol = global.Symbol;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.InstallConstants(GlobalSymbol, [
// TODO(dslomov, caitp): Move to symbol.js when shipping
- "toStringTag", symbolToStringTag
+ "toStringTag", toStringTagSymbol
]);
})
diff --git a/deps/v8/src/harmony-typedarray.js b/deps/v8/src/harmony-typedarray.js
index cd220dae83..9d66e211e9 100644
--- a/deps/v8/src/harmony-typedarray.js
+++ b/deps/v8/src/harmony-typedarray.js
@@ -215,7 +215,7 @@ function TypedArraySort(comparefn) {
comparefn = TypedArrayComparefn;
}
- return %_CallFunction(this, length, comparefn, InnerArraySort);
+ return InnerArraySort(this, length, comparefn);
}
@@ -224,8 +224,7 @@ function TypedArrayIndexOf(element, index) {
if (!%_IsTypedArray(this)) throw MakeTypeError(kNotTypedArray);
var length = %_TypedArrayGetLength(this);
-
- return %_CallFunction(this, element, index, length, InnerArrayIndexOf);
+ return InnerArrayIndexOf(this, element, index, length);
}
%FunctionSetLength(TypedArrayIndexOf, 1);
@@ -236,8 +235,8 @@ function TypedArrayLastIndexOf(element, index) {
var length = %_TypedArrayGetLength(this);
- return %_CallFunction(this, element, index, length,
- %_ArgumentsLength(), InnerArrayLastIndexOf);
+ return InnerArrayLastIndexOf(this, element, index, length,
+ %_ArgumentsLength());
}
%FunctionSetLength(TypedArrayLastIndexOf, 1);
@@ -278,7 +277,7 @@ function TypedArrayToLocaleString() {
// ES6 section 22.2.3.28
function TypedArrayToString() {
- return %_CallFunction(this, ArrayToString);
+ return %_Call(ArrayToString, this);
}
@@ -373,7 +372,7 @@ function TypedArrayOf() {
function TypedArrayFrom(source, mapfn, thisArg) {
// TODO(littledan): Investigate if there is a receiver which could be
// faster to accumulate on than Array, e.g., a TypedVector.
- var array = %_CallFunction(GlobalArray, source, mapfn, thisArg, ArrayFrom);
+ var array = %_Call(ArrayFrom, GlobalArray, source, mapfn, thisArg);
return ConstructTypedArray(this, array);
}
%FunctionSetLength(TypedArrayFrom, 1);
diff --git a/deps/v8/src/heap/OWNERS b/deps/v8/src/heap/OWNERS
index a8533293e1..32da1ecead 100644
--- a/deps/v8/src/heap/OWNERS
+++ b/deps/v8/src/heap/OWNERS
@@ -1,6 +1,7 @@
set noparent
hpayer@chromium.org
+jochen@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
ulan@chromium.org
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
new file mode 100644
index 0000000000..bbe3c6b936
--- /dev/null
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -0,0 +1,138 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "src/objects-inl.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+ArrayBufferTracker::~ArrayBufferTracker() {
+ Isolate* isolate = heap()->isolate();
+ size_t freed_memory = 0;
+ for (auto& buffer : live_array_buffers_) {
+ isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+ freed_memory += buffer.second;
+ }
+ for (auto& buffer : live_array_buffers_for_scavenge_) {
+ isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+ freed_memory += buffer.second;
+ }
+ live_array_buffers_.clear();
+ live_array_buffers_for_scavenge_.clear();
+ not_yet_discovered_array_buffers_.clear();
+ not_yet_discovered_array_buffers_for_scavenge_.clear();
+
+ if (freed_memory > 0) {
+ heap()->update_amount_of_external_allocated_memory(
+ -static_cast<int64_t>(freed_memory));
+ }
+}
+
+
+void ArrayBufferTracker::RegisterNew(JSArrayBuffer* buffer) {
+ void* data = buffer->backing_store();
+ if (!data) return;
+
+ bool in_new_space = heap()->InNewSpace(buffer);
+ size_t length = NumberToSize(heap()->isolate(), buffer->byte_length());
+ if (in_new_space) {
+ live_array_buffers_for_scavenge_[data] = length;
+ } else {
+ live_array_buffers_[data] = length;
+ }
+
+ // We may go over the limit of externally allocated memory here. We call the
+ // api function to trigger a GC in this case.
+ reinterpret_cast<v8::Isolate*>(heap()->isolate())
+ ->AdjustAmountOfExternalAllocatedMemory(length);
+}
+
+
+void ArrayBufferTracker::Unregister(JSArrayBuffer* buffer) {
+ void* data = buffer->backing_store();
+ if (!data) return;
+
+ bool in_new_space = heap()->InNewSpace(buffer);
+ std::map<void*, size_t>* live_buffers =
+ in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
+ std::map<void*, size_t>* not_yet_discovered_buffers =
+ in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
+ : &not_yet_discovered_array_buffers_;
+
+ DCHECK(live_buffers->count(data) > 0);
+
+ size_t length = (*live_buffers)[data];
+ live_buffers->erase(data);
+ not_yet_discovered_buffers->erase(data);
+
+ heap()->update_amount_of_external_allocated_memory(
+ -static_cast<int64_t>(length));
+}
+
+
+void ArrayBufferTracker::MarkLive(JSArrayBuffer* buffer) {
+ void* data = buffer->backing_store();
+
+ // ArrayBuffer might be in the middle of being constructed.
+ if (data == heap()->undefined_value()) return;
+ if (heap()->InNewSpace(buffer)) {
+ not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+ } else {
+ not_yet_discovered_array_buffers_.erase(data);
+ }
+}
+
+
+void ArrayBufferTracker::FreeDead(bool from_scavenge) {
+ size_t freed_memory = 0;
+ Isolate* isolate = heap()->isolate();
+ for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
+ isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+ freed_memory += buffer.second;
+ live_array_buffers_for_scavenge_.erase(buffer.first);
+ }
+
+ if (!from_scavenge) {
+ for (auto& buffer : not_yet_discovered_array_buffers_) {
+ isolate->array_buffer_allocator()->Free(buffer.first, buffer.second);
+ freed_memory += buffer.second;
+ live_array_buffers_.erase(buffer.first);
+ }
+ }
+
+ not_yet_discovered_array_buffers_for_scavenge_ =
+ live_array_buffers_for_scavenge_;
+ if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
+
+ // Do not call through the api as this code is triggered while doing a GC.
+ heap()->update_amount_of_external_allocated_memory(
+ -static_cast<int64_t>(freed_memory));
+}
+
+
+void ArrayBufferTracker::PrepareDiscoveryInNewSpace() {
+ not_yet_discovered_array_buffers_for_scavenge_ =
+ live_array_buffers_for_scavenge_;
+}
+
+
+void ArrayBufferTracker::Promote(JSArrayBuffer* buffer) {
+ if (buffer->is_external()) return;
+ void* data = buffer->backing_store();
+ if (!data) return;
+ // ArrayBuffer might be in the middle of being constructed.
+ if (data == heap()->undefined_value()) return;
+ DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
+ live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
+ live_array_buffers_for_scavenge_.erase(data);
+ not_yet_discovered_array_buffers_for_scavenge_.erase(data);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
new file mode 100644
index 0000000000..c12557a9fc
--- /dev/null
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -0,0 +1,73 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ARRAY_BUFFER_TRACKER_H_
+#define V8_HEAP_ARRAY_BUFFER_TRACKER_H_
+
+#include <map>
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Heap;
+class JSArrayBuffer;
+
+class ArrayBufferTracker {
+ public:
+ explicit ArrayBufferTracker(Heap* heap) : heap_(heap) {}
+ ~ArrayBufferTracker();
+
+ inline Heap* heap() { return heap_; }
+
+ // The following methods are used to track raw C++ pointers to externally
+ // allocated memory used as backing store in live array buffers.
+
+ // A new ArrayBuffer was created with |data| as backing store.
+ void RegisterNew(JSArrayBuffer* buffer);
+
+ // The backing store |data| is no longer owned by V8.
+ void Unregister(JSArrayBuffer* buffer);
+
+ // A live ArrayBuffer was discovered during marking/scavenge.
+ void MarkLive(JSArrayBuffer* buffer);
+
+ // Frees all backing store pointers that weren't discovered in the previous
+ // marking or scavenge phase.
+ void FreeDead(bool from_scavenge);
+
+ // Prepare for a new scavenge phase. A new marking phase is implicitly
+ // prepared by finishing the previous one.
+ void PrepareDiscoveryInNewSpace();
+
+ // An ArrayBuffer moved from new space to old space.
+ void Promote(JSArrayBuffer* buffer);
+
+ private:
+ Heap* heap_;
+
+ // |live_array_buffers_| maps externally allocated memory used as backing
+ // store for ArrayBuffers to the length of the respective memory blocks.
+ //
+ // At the beginning of mark/compact, |not_yet_discovered_array_buffers_| is
+ // a copy of |live_array_buffers_| and we remove pointers as we discover live
+ // ArrayBuffer objects during marking. At the end of mark/compact, the
+ // remaining memory blocks can be freed.
+ std::map<void*, size_t> live_array_buffers_;
+ std::map<void*, size_t> not_yet_discovered_array_buffers_;
+
+ // To be able to free memory held by ArrayBuffers during scavenge as well, we
+ // have a separate list of allocated memory held by ArrayBuffers in new space.
+ //
+ // Since mark/compact also evacuates the new space, all pointers in the
+ // |live_array_buffers_for_scavenge_| list are also in the
+ // |live_array_buffers_| list.
+ std::map<void*, size_t> live_array_buffers_for_scavenge_;
+ std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
+};
+}
+} // namespace v8::internal
+#endif // V8_HEAP_ARRAY_BUFFER_TRACKER_H_
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index 096412d578..e1f9ef43e7 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -26,42 +26,24 @@ void GCIdleTimeAction::Print() {
case DO_NOTHING:
PrintF("no action");
break;
- case DO_INCREMENTAL_MARKING:
- PrintF("incremental marking with step %" V8_PTR_PREFIX "d / ms",
- parameter);
+ case DO_INCREMENTAL_STEP:
+ PrintF("incremental step");
if (additional_work) {
PrintF("; finalized marking");
}
break;
- case DO_SCAVENGE:
- PrintF("scavenge");
- break;
case DO_FULL_GC:
PrintF("full GC");
break;
- case DO_FINALIZE_SWEEPING:
- PrintF("finalize sweeping");
- break;
}
}
-void GCIdleTimeHandler::HeapState::Print() {
+void GCIdleTimeHeapState::Print() {
PrintF("contexts_disposed=%d ", contexts_disposed);
PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
- PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
- PrintF("has_low_allocation_rate=%d", has_low_allocation_rate);
- PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
- mark_compact_speed_in_bytes_per_ms);
- PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
- incremental_marking_speed_in_bytes_per_ms);
- PrintF("scavenge_speed=%" V8_PTR_PREFIX "d ", scavenge_speed_in_bytes_per_ms);
- PrintF("new_space_size=%" V8_PTR_PREFIX "d ", used_new_space_size);
- PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
- PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
- new_space_allocation_throughput_in_bytes_per_ms);
}
@@ -111,63 +93,6 @@ size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
}
-bool GCIdleTimeHandler::ShouldDoScavenge(
- size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
- size_t scavenge_speed_in_bytes_per_ms,
- size_t new_space_allocation_throughput_in_bytes_per_ms) {
- if (idle_time_in_ms >= kMinBackgroundIdleTime) {
- // It is better to do full GC for the background tab.
- return false;
- }
- size_t new_space_allocation_limit =
- kMaxScheduledIdleTime * scavenge_speed_in_bytes_per_ms;
-
- // If the limit is larger than the new space size, then scavenging used to be
- // really fast. We can take advantage of the whole new space.
- if (new_space_allocation_limit > new_space_size) {
- new_space_allocation_limit = new_space_size;
- }
-
- // We do not know the allocation throughput before the first scavenge.
- // TODO(hpayer): Estimate allocation throughput before the first scavenge.
- if (new_space_allocation_throughput_in_bytes_per_ms > 0) {
- // We have to trigger scavenge before we reach the end of new space.
- size_t adjust_limit = new_space_allocation_throughput_in_bytes_per_ms *
- kTimeUntilNextIdleEvent;
- if (adjust_limit > new_space_allocation_limit) {
- new_space_allocation_limit = 0;
- } else {
- new_space_allocation_limit -= adjust_limit;
- }
- }
-
- if (new_space_allocation_throughput_in_bytes_per_ms <
- kLowAllocationThroughput) {
- new_space_allocation_limit =
- Min(new_space_allocation_limit,
- static_cast<size_t>(new_space_size * kConservativeTimeRatio));
- }
-
- // The allocated new space limit to trigger a scavange has to be at least
- // kMinimumNewSpaceSizeToPerformScavenge.
- if (new_space_allocation_limit < kMinimumNewSpaceSizeToPerformScavenge) {
- new_space_allocation_limit = kMinimumNewSpaceSizeToPerformScavenge;
- }
-
- if (scavenge_speed_in_bytes_per_ms == 0) {
- scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
- }
-
- if (new_space_allocation_limit <= used_new_space_size) {
- if (used_new_space_size / scavenge_speed_in_bytes_per_ms <=
- idle_time_in_ms) {
- return true;
- }
- }
- return false;
-}
-
-
bool GCIdleTimeHandler::ShouldDoMarkCompact(
size_t idle_time_in_ms, size_t size_of_objects,
size_t mark_compact_speed_in_bytes_per_ms) {
@@ -228,7 +153,7 @@ GCIdleTimeAction GCIdleTimeHandler::NothingOrDone(double idle_time_in_ms) {
// (5) If incremental marking is in progress, we perform a marking step. Note,
// that this currently may trigger a full garbage collection.
GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
- HeapState heap_state) {
+ GCIdleTimeHeapState heap_state) {
if (static_cast<int>(idle_time_in_ms) <= 0) {
if (heap_state.incremental_marking_stopped) {
if (ShouldDoContextDisposalMarkCompact(
@@ -247,30 +172,11 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
return NothingOrDone(idle_time_in_ms);
}
- if (ShouldDoScavenge(
- static_cast<size_t>(idle_time_in_ms), heap_state.new_space_capacity,
- heap_state.used_new_space_size,
- heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
- return GCIdleTimeAction::Scavenge();
- }
-
- if (heap_state.sweeping_in_progress) {
- if (heap_state.sweeping_completed) {
- return GCIdleTimeAction::FinalizeSweeping();
- } else {
- return NothingOrDone(idle_time_in_ms);
- }
- }
-
if (!FLAG_incremental_marking || heap_state.incremental_marking_stopped) {
return GCIdleTimeAction::Done();
}
- size_t step_size = EstimateMarkingStepSize(
- static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
- heap_state.incremental_marking_speed_in_bytes_per_ms);
- return GCIdleTimeAction::IncrementalMarking(step_size);
+ return GCIdleTimeAction::IncrementalStep();
}
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index ebd132e752..74ef1b1e87 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -13,10 +13,8 @@ namespace internal {
enum GCIdleTimeActionType {
DONE,
DO_NOTHING,
- DO_INCREMENTAL_MARKING,
- DO_SCAVENGE,
+ DO_INCREMENTAL_STEP,
DO_FULL_GC,
- DO_FINALIZE_SWEEPING
};
@@ -25,7 +23,6 @@ class GCIdleTimeAction {
static GCIdleTimeAction Done() {
GCIdleTimeAction result;
result.type = DONE;
- result.parameter = 0;
result.additional_work = false;
return result;
}
@@ -33,23 +30,13 @@ class GCIdleTimeAction {
static GCIdleTimeAction Nothing() {
GCIdleTimeAction result;
result.type = DO_NOTHING;
- result.parameter = 0;
result.additional_work = false;
return result;
}
- static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
+ static GCIdleTimeAction IncrementalStep() {
GCIdleTimeAction result;
- result.type = DO_INCREMENTAL_MARKING;
- result.parameter = step_size;
- result.additional_work = false;
- return result;
- }
-
- static GCIdleTimeAction Scavenge() {
- GCIdleTimeAction result;
- result.type = DO_SCAVENGE;
- result.parameter = 0;
+ result.type = DO_INCREMENTAL_STEP;
result.additional_work = false;
return result;
}
@@ -57,15 +44,6 @@ class GCIdleTimeAction {
static GCIdleTimeAction FullGC() {
GCIdleTimeAction result;
result.type = DO_FULL_GC;
- result.parameter = 0;
- result.additional_work = false;
- return result;
- }
-
- static GCIdleTimeAction FinalizeSweeping() {
- GCIdleTimeAction result;
- result.type = DO_FINALIZE_SWEEPING;
- result.parameter = 0;
result.additional_work = false;
return result;
}
@@ -73,12 +51,20 @@ class GCIdleTimeAction {
void Print();
GCIdleTimeActionType type;
- intptr_t parameter;
bool additional_work;
};
-class GCTracer;
+class GCIdleTimeHeapState {
+ public:
+ void Print();
+
+ int contexts_disposed;
+ double contexts_disposal_rate;
+ size_t size_of_objects;
+ bool incremental_marking_stopped;
+};
+
// The idle time handler makes decisions about which garbage collection
// operations are executing during IdleNotification.
@@ -120,21 +106,10 @@ class GCIdleTimeHandler {
static const int kMinBackgroundIdleTime = 900;
- // We conservatively assume that in the next kTimeUntilNextIdleEvent ms
- // no idle notification happens.
- static const size_t kTimeUntilNextIdleEvent = 100;
-
// An allocation throughput below kLowAllocationThroughput bytes/ms is
// considered low
static const size_t kLowAllocationThroughput = 1000;
- // If we haven't recorded any scavenger events yet, we use a conservative
- // lower bound for the scavenger speed.
- static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
-
- // The minimum size of allocated new space objects to trigger a scavenge.
- static const size_t kMinimumNewSpaceSizeToPerformScavenge = MB / 2;
-
// If contexts are disposed at a higher rate a full gc is triggered.
static const double kHighContextDisposalRate;
@@ -148,29 +123,10 @@ class GCIdleTimeHandler {
// ensure we don't keep scheduling idle tasks and making no progress.
static const int kMaxNoProgressIdleTimes = 10;
- class HeapState {
- public:
- void Print();
-
- int contexts_disposed;
- double contexts_disposal_rate;
- size_t size_of_objects;
- bool incremental_marking_stopped;
- bool sweeping_in_progress;
- bool sweeping_completed;
- bool has_low_allocation_rate;
- size_t mark_compact_speed_in_bytes_per_ms;
- size_t incremental_marking_speed_in_bytes_per_ms;
- size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
- size_t scavenge_speed_in_bytes_per_ms;
- size_t used_new_space_size;
- size_t new_space_capacity;
- size_t new_space_allocation_throughput_in_bytes_per_ms;
- };
-
GCIdleTimeHandler() : idle_times_which_made_no_progress_(0) {}
- GCIdleTimeAction Compute(double idle_time_in_ms, HeapState heap_state);
+ GCIdleTimeAction Compute(double idle_time_in_ms,
+ GCIdleTimeHeapState heap_state);
void ResetNoProgressCounter() { idle_times_which_made_no_progress_ = 0; }
@@ -196,11 +152,6 @@ class GCIdleTimeHandler {
static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
- static bool ShouldDoScavenge(
- size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
- size_t scavenger_speed_in_bytes_per_ms,
- size_t new_space_allocation_throughput_in_bytes_per_ms);
-
private:
GCIdleTimeAction NothingOrDone(double idle_time_in_ms);
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 3b8e24b474..e27fa27d83 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -5,9 +5,8 @@
#include "src/heap/gc-tracer.h"
#include "src/counters.h"
-#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
#include "src/isolate.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
@@ -429,8 +428,6 @@ void GCTracer::PrintNVP() const {
PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
- PrintF("rescan_lo=%.2f ",
- current_.scopes[Scope::MC_RESCAN_LARGE_OBJECTS]);
PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
PrintF("new_new=%.1f ",
current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
@@ -453,6 +450,9 @@ void GCTracer::PrintNVP() const {
current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
PrintF("weakcollection_abort=%.1f ",
current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
+ PrintF("weakcells=%.1f ", current_.scopes[Scope::MC_WEAKCELL]);
+ PrintF("nonlive_refs=%.1f ",
+ current_.scopes[Scope::MC_NONLIVEREFERENCES]);
PrintF("steps_count=%d ", current_.incremental_marking_steps);
PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
@@ -474,9 +474,9 @@ void GCTracer::PrintNVP() const {
intptr_t allocated_since_last_gc =
current_.start_object_size - previous_.end_object_size;
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc);
- PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
+ PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size());
PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
- heap_->semi_space_copied_object_size_);
+ heap_->semi_space_copied_object_size());
PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index 7572059dc9..de48d23997 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -105,7 +105,6 @@ class GCTracer {
MC_SWEEP_CODE,
MC_SWEEP_CELL,
MC_SWEEP_MAP,
- MC_RESCAN_LARGE_OBJECTS,
MC_EVACUATE_PAGES,
MC_UPDATE_NEW_TO_NEW_POINTERS,
MC_UPDATE_ROOT_TO_NEW_POINTERS,
@@ -118,6 +117,8 @@ class GCTracer {
MC_WEAKCOLLECTION_PROCESS,
MC_WEAKCOLLECTION_CLEAR,
MC_WEAKCOLLECTION_ABORT,
+ MC_WEAKCELL,
+ MC_NONLIVEREFERENCES,
MC_FLUSH_CODE,
SCAVENGER_CODE_FLUSH_CANDIDATES,
SCAVENGER_OBJECT_GROUPS,
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index aecdd40988..cff69b1e17 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -14,12 +14,11 @@
#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
#include "src/heap/store-buffer-inl.h"
-#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
#include "src/log.h"
#include "src/msan.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -66,7 +65,7 @@ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
-#define SYMBOL_ACCESSOR(name, varname, description) \
+#define SYMBOL_ACCESSOR(name, description) \
Symbol* Heap::name() { return Symbol::cast(roots_[k##name##RootIndex]); }
PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
#undef SYMBOL_ACCESSOR
@@ -123,12 +122,11 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
// Compute map and object size.
Map* map = one_byte_internalized_string_map();
int size = SeqOneByteString::SizeFor(str.length());
- AllocationSpace space = SelectSpace(size, TENURED);
// Allocate string.
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
@@ -155,12 +153,11 @@ AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
// Compute map and object size.
Map* map = internalized_string_map();
int size = SeqTwoByteString::SizeFor(str.length());
- AllocationSpace space = SelectSpace(size, TENURED);
// Allocate string.
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
@@ -192,13 +189,12 @@ AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
- AllocationSpace retry_space,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
DCHECK(gc_state_ == NOT_IN_GC);
#ifdef DEBUG
- if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) &&
+ if (FLAG_gc_interval >= 0 && !always_allocate() &&
Heap::allocation_timeout_-- <= 0) {
return AllocationResult::Retry(space);
}
@@ -206,13 +202,14 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
isolate_->counters()->objs_since_last_young()->Increment();
#endif
- HeapObject* object;
+ bool large_object = size_in_bytes > Page::kMaxRegularHeapObjectSize;
+ HeapObject* object = nullptr;
AllocationResult allocation;
if (NEW_SPACE == space) {
- allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
- if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
- space = retry_space;
+ if (large_object) {
+ space = LO_SPACE;
} else {
+ allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
@@ -220,20 +217,27 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
}
}
+ // Here we only allocate in the old generation.
if (OLD_SPACE == space) {
- allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
+ if (large_object) {
+ allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+ } else {
+ allocation = old_space_->AllocateRaw(size_in_bytes, alignment);
+ }
} else if (CODE_SPACE == space) {
if (size_in_bytes <= code_space()->AreaSize()) {
allocation = code_space_->AllocateRawUnaligned(size_in_bytes);
} else {
- // Large code objects are allocated in large object space.
allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
}
} else if (LO_SPACE == space) {
+ DCHECK(large_object);
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
- } else {
- DCHECK(MAP_SPACE == space);
+ } else if (MAP_SPACE == space) {
allocation = map_space_->AllocateRawUnaligned(size_in_bytes);
+ } else {
+ // NEW_SPACE is not allowed here.
+ UNREACHABLE();
}
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
@@ -323,9 +327,8 @@ void Heap::UpdateAllocationsHash(uint32_t value) {
}
-void Heap::PrintAlloctionsHash() {
- uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
- PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash);
+void Heap::RegisterExternalString(String* string) {
+ external_string_table_.AddString(string);
}
@@ -461,9 +464,6 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
}
-void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
-
-
AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
// Check if there is potentially a memento behind the object. If
// the last word of the memento is on another page we return
@@ -521,33 +521,6 @@ void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
}
-void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
- DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
-
- // We use the first word (where the map pointer usually is) of a heap
- // object to record the forwarding pointer. A forwarding pointer can
- // point to an old space, the code space, or the to space of the new
- // generation.
- MapWord first_word = object->map_word();
-
- // If the first word is a forwarding address, the object has already been
- // copied.
- if (first_word.IsForwardingAddress()) {
- HeapObject* dest = first_word.ToForwardingAddress();
- DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
- *p = dest;
- return;
- }
-
- UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
-
- // AllocationMementos are unrooted and shouldn't survive a scavenge
- DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
- // Call the slow part of scavenge object.
- return ScavengeObjectSlow(p, object);
-}
-
-
bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
const v8::GCCallbackFlags callbackFlags) {
const char* collector_reason = NULL;
@@ -563,58 +536,7 @@ Isolate* Heap::isolate() {
}
-// Calls the FUNCTION_CALL function and retries it up to three times
-// to guarantee that any allocations performed during the call will
-// succeed if there's enough memory.
-
-// Warning: Do not use the identifiers __object__, __maybe_object__ or
-// __scope__ in a call to this macro.
-
-#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- if (__allocation__.To(&__object__)) { \
- DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
- RETURN_VALUE; \
- }
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY) \
- do { \
- AllocationResult __allocation__ = FUNCTION_CALL; \
- Object* __object__ = NULL; \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- /* Two GCs before panicking. In newspace will almost always succeed. */ \
- for (int __i__ = 0; __i__ < 2; __i__++) { \
- (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(), \
- "allocation failure"); \
- __allocation__ = FUNCTION_CALL; \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- } \
- (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment(); \
- (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc"); \
- { \
- AlwaysAllocateScope __scope__(ISOLATE); \
- __allocation__ = FUNCTION_CALL; \
- } \
- RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
- /* TODO(1181417): Fix this. */ \
- v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
- RETURN_EMPTY; \
- } while (false)
-
-#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
- RETURN_EMPTY) \
- CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
-
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE) \
- CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, \
- return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
- return Handle<TYPE>())
-
-
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
- CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
-
-
-void ExternalStringTable::AddString(String* string) {
+void Heap::ExternalStringTable::AddString(String* string) {
DCHECK(string->IsExternalString());
if (heap_->InNewSpace(string)) {
new_space_strings_.Add(string);
@@ -624,7 +546,7 @@ void ExternalStringTable::AddString(String* string) {
}
-void ExternalStringTable::Iterate(ObjectVisitor* v) {
+void Heap::ExternalStringTable::Iterate(ObjectVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
v->VisitPointers(start, start + new_space_strings_.length());
@@ -638,7 +560,7 @@ void ExternalStringTable::Iterate(ObjectVisitor* v) {
// Verify() is inline to avoid ifdef-s around its calls in release
// mode.
-void ExternalStringTable::Verify() {
+void Heap::ExternalStringTable::Verify() {
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
@@ -654,14 +576,14 @@ void ExternalStringTable::Verify() {
}
-void ExternalStringTable::AddOldString(String* string) {
+void Heap::ExternalStringTable::AddOldString(String* string) {
DCHECK(string->IsExternalString());
DCHECK(!heap_->InNewSpace(string));
old_space_strings_.Add(string);
}
-void ExternalStringTable::ShrinkNewStrings(int position) {
+void Heap::ExternalStringTable::ShrinkNewStrings(int position) {
new_space_strings_.Rewind(position);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -715,12 +637,15 @@ uint32_t Heap::HashSeed() {
}
-Smi* Heap::NextScriptId() {
- int next_id = last_script_id()->value() + 1;
- if (!Smi::IsValid(next_id) || next_id < 0) next_id = 1;
- Smi* next_id_smi = Smi::FromInt(next_id);
- set_last_script_id(next_id_smi);
- return next_id_smi;
+int Heap::NextScriptId() {
+ int last_id = last_script_id()->value();
+ if (last_id == Smi::kMaxValue) {
+ last_id = 1;
+ } else {
+ last_id++;
+ }
+ set_last_script_id(Smi::FromInt(last_id));
+ return last_id;
}
@@ -749,26 +674,13 @@ void Heap::SetSetterStubDeoptPCOffset(int pc_offset) {
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
- : heap_(isolate->heap()), daf_(isolate) {
- heap_->always_allocate_scope_depth_++;
+ : heap_(isolate->heap()) {
+ heap_->always_allocate_scope_count_.Increment(1);
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
- heap_->always_allocate_scope_depth_--;
-}
-
-
-GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
- heap_->gc_callbacks_depth_++;
-}
-
-
-GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
-
-
-bool GCCallbacksScope::CheckReenter() {
- return heap_->gc_callbacks_depth_ == 1;
+ heap_->always_allocate_scope_count_.Increment(-1);
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index edf9dea31d..e04f99ff7e 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -13,25 +13,30 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
+#include "src/heap/array-buffer-tracker.h"
#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
#include "src/heap/memory-reducer.h"
+#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/scavenge-job.h"
+#include "src/heap/scavenger-inl.h"
#include "src/heap/store-buffer.h"
-#include "src/heap-profiler.h"
#include "src/interpreter/interpreter.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
+#include "src/type-feedback-vector.h"
#include "src/utils.h"
#include "src/v8.h"
#include "src/v8threads.h"
@@ -71,7 +76,7 @@ Heap::Heap()
maximum_committed_(0),
survived_since_last_expansion_(0),
survived_last_scavenge_(0),
- always_allocate_scope_depth_(0),
+ always_allocate_scope_count_(0),
contexts_disposed_(0),
global_ic_age_(0),
scan_on_scavenge_pages_(0),
@@ -88,7 +93,6 @@ Heap::Heap()
ms_count_(0),
gc_count_(0),
remembered_unmapped_pages_index_(0),
- unflattened_strings_length_(0),
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
@@ -97,9 +101,8 @@ Heap::Heap()
optimize_for_memory_usage_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
- hidden_string_(NULL),
total_regexp_code_generated_(0),
- tracer_(this),
+ tracer_(nullptr),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_ratio_(0),
@@ -118,10 +121,14 @@ Heap::Heap()
sweeping_time_(0.0),
last_idle_notification_time_(0.0),
last_gc_time_(0.0),
- mark_compact_collector_(this),
+ scavenge_collector_(nullptr),
+ mark_compact_collector_(nullptr),
store_buffer_(this),
- incremental_marking_(this),
- memory_reducer_(this),
+ incremental_marking_(nullptr),
+ gc_idle_time_handler_(nullptr),
+ memory_reducer_(nullptr),
+ object_stats_(nullptr),
+ scavenge_job_(nullptr),
full_codegen_bytes_generated_(0),
crankshaft_codegen_bytes_generated_(0),
new_space_allocation_counter_(0),
@@ -134,12 +141,16 @@ Heap::Heap()
promotion_queue_(this),
configured_(false),
current_gc_flags_(Heap::kNoGCFlags),
+ current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags),
external_string_table_(this),
chunks_queued_for_free_(NULL),
+ concurrent_unmapping_tasks_active_(0),
+ pending_unmapping_tasks_semaphore_(0),
gc_callbacks_depth_(0),
deserialization_complete_(false),
concurrent_sweeping_enabled_(false),
- strong_roots_list_(NULL) {
+ strong_roots_list_(NULL),
+ array_buffer_tracker_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -158,8 +169,6 @@ Heap::Heap()
// Put a dummy entry in the remembered pages so we can find the list the
// minidump even if there are no real unmapped pages.
RememberUnmappedPage(NULL, false);
-
- ClearObjectStats(true);
}
@@ -402,7 +411,6 @@ void Heap::GarbageCollectionPrologue() {
{
AllowHeapAllocation for_the_first_part_of_prologue;
gc_count_++;
- unflattened_strings_length_ = 0;
if (FLAG_flush_code) {
mark_compact_collector()->EnableCodeFlushing(true);
@@ -478,8 +486,14 @@ const char* Heap::GetSpaceName(int idx) {
}
-void Heap::ClearAllICsByKind(Code::Kind kind) {
- // TODO(mvstanton): Do not iterate the heap.
+void Heap::ClearAllKeyedStoreICs() {
+ if (FLAG_vector_stores) {
+ TypeFeedbackVector::ClearAllKeyedStoreICs(isolate_);
+ return;
+ }
+
+ // TODO(mvstanton): Remove this function when FLAG_vector_stores is turned on
+ // permanently, and divert all callers to KeyedStoreIC::ClearAllKeyedStoreICs.
HeapObjectIterator it(code_space());
for (Object* object = it.Next(); object != NULL; object = it.Next()) {
@@ -487,7 +501,7 @@ void Heap::ClearAllICsByKind(Code::Kind kind) {
Code::Kind current_kind = code->kind();
if (current_kind == Code::FUNCTION ||
current_kind == Code::OPTIMIZED_FUNCTION) {
- code->ClearInlineCaches(kind);
+ code->ClearInlineCaches(Code::KEYED_STORE_IC);
}
}
}
@@ -717,12 +731,9 @@ void Heap::GarbageCollectionEpilogue() {
void Heap::PreprocessStackTraces() {
- if (!weak_stack_trace_list()->IsWeakFixedArray()) return;
- WeakFixedArray* array = WeakFixedArray::cast(weak_stack_trace_list());
- int length = array->Length();
- for (int i = 0; i < length; i++) {
- if (array->IsEmptySlot(i)) continue;
- FixedArray* elements = FixedArray::cast(array->Get(i));
+ WeakFixedArray::Iterator iterator(weak_stack_trace_list());
+ FixedArray* elements;
+ while ((elements = iterator.Next<FixedArray>())) {
for (int j = 1; j < elements->length(); j += 4) {
Object* maybe_code = elements->get(j + 2);
// If GC happens while adding a stack trace to the weak fixed array,
@@ -742,11 +753,25 @@ void Heap::PreprocessStackTraces() {
}
+class GCCallbacksScope {
+ public:
+ explicit GCCallbacksScope(Heap* heap) : heap_(heap) {
+ heap_->gc_callbacks_depth_++;
+ }
+ ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
+
+ bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; }
+
+ private:
+ Heap* heap_;
+};
+
+
void Heap::HandleGCRequest() {
if (incremental_marking()->request_type() ==
IncrementalMarking::COMPLETE_MARKING) {
- CollectAllGarbage(current_gc_flags(), "GC interrupt",
- incremental_marking()->CallbackFlags());
+ CollectAllGarbage(current_gc_flags_, "GC interrupt",
+ current_gc_callback_flags_);
return;
}
DCHECK(FLAG_overapproximate_weak_closure);
@@ -756,6 +781,11 @@ void Heap::HandleGCRequest() {
}
+void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) {
+ scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated);
+}
+
+
void Heap::OverApproximateWeakClosure(const char* gc_reason) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
@@ -772,9 +802,7 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
- // TODO(mlippautz): Report kGCTypeIncremental once blink updates its
- // filtering.
- CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
+ CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
}
}
incremental_marking()->MarkObjectGroups();
@@ -785,9 +813,7 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
VMState<EXTERNAL> state(isolate_);
HandleScope handle_scope(isolate_);
- // TODO(mlippautz): Report kGCTypeIncremental once blink updates its
- // filtering.
- CallGCEpilogueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
+ CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags);
}
}
}
@@ -839,6 +865,32 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
}
+void Heap::ReportExternalMemoryPressure(const char* gc_reason) {
+ if (incremental_marking()->IsStopped()) {
+ if (incremental_marking()->CanBeActivated()) {
+ StartIncrementalMarking(
+ i::Heap::kNoGCFlags,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing, gc_reason);
+ } else {
+ CollectAllGarbage(i::Heap::kNoGCFlags, gc_reason,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing);
+ }
+ } else {
+ // Incremental marking is turned on an has already been started.
+
+ // TODO(mlippautz): Compute the time slice for incremental marking based on
+ // memory pressure.
+ double deadline = MonotonicallyIncreasingTimeInMs() +
+ FLAG_external_allocation_limit_incremental_time;
+ incremental_marking()->AdvanceIncrementalMarking(
+ 0, deadline,
+ IncrementalMarking::StepActions(IncrementalMarking::GC_VIA_STACK_GUARD,
+ IncrementalMarking::FORCE_MARKING,
+ IncrementalMarking::FORCE_COMPLETION));
+ }
+}
+
+
void Heap::EnsureFillerObjectAtTop() {
// There may be an allocation memento behind every object in new space.
// If we evacuate a not full new space or if we are on the last page of
@@ -889,7 +941,8 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (!incremental_marking()->IsComplete() &&
- !mark_compact_collector_.marking_deque_.IsEmpty() && !FLAG_gc_global) {
+ !mark_compact_collector()->marking_deque_.IsEmpty() &&
+ !FLAG_gc_global) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
}
@@ -939,7 +992,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
HasHighFragmentation(used_memory_after, committed_memory_after) ||
(detached_contexts()->length() > 0);
if (deserialization_complete_) {
- memory_reducer_.NotifyMarkCompact(event);
+ memory_reducer_->NotifyMarkCompact(event);
}
}
@@ -955,7 +1008,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
// generator needs incremental marking to stay off after it aborted.
if (!ShouldAbortIncrementalMarking() && incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
- incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
+ StartIncrementalMarking(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue");
}
return next_gc_likely_to_collect_more;
@@ -977,7 +1030,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
MemoryReducer::Event event;
event.type = MemoryReducer::kContextDisposed;
event.time_ms = MonotonicallyIncreasingTimeInMs();
- memory_reducer_.NotifyContextDisposed(event);
+ memory_reducer_->NotifyContextDisposed(event);
return ++contexts_disposed_;
}
@@ -986,12 +1039,14 @@ void Heap::StartIncrementalMarking(int gc_flags,
const GCCallbackFlags gc_callback_flags,
const char* reason) {
DCHECK(incremental_marking()->IsStopped());
- incremental_marking()->Start(gc_flags, gc_callback_flags, reason);
+ set_current_gc_flags(gc_flags);
+ current_gc_callback_flags_ = gc_callback_flags;
+ incremental_marking()->Start(reason);
}
void Heap::StartIdleIncrementalMarking() {
- gc_idle_time_handler_.ResetNoProgressCounter();
+ gc_idle_time_handler_->ResetNoProgressCounter();
StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags,
"idle");
}
@@ -1065,7 +1120,7 @@ bool Heap::ReserveSpace(Reservation* reservations) {
} else {
allocation = paged_space(space)->AllocateRawUnaligned(size);
}
- HeapObject* free_space;
+ HeapObject* free_space = nullptr;
if (allocation.To(&free_space)) {
// Mark with a free list node, in case we have a GC before
// deserializing.
@@ -1228,8 +1283,8 @@ bool Heap::PerformGarbageCollection(
// We finished a marking cycle. We can uncommit the marking deque until
// we start marking again.
- mark_compact_collector_.marking_deque()->Uninitialize();
- mark_compact_collector_.EnsureMarkingDequeIsCommitted(
+ mark_compact_collector()->marking_deque()->Uninitialize();
+ mark_compact_collector()->EnsureMarkingDequeIsCommitted(
MarkCompactCollector::kMinMarkingDequeSize);
}
@@ -1323,13 +1378,13 @@ void Heap::MarkCompact() {
uint64_t size_of_objects_before_gc = SizeOfObjects();
- mark_compact_collector_.Prepare();
+ mark_compact_collector()->Prepare();
ms_count_++;
MarkCompactPrologue();
- mark_compact_collector_.CollectGarbage();
+ mark_compact_collector()->CollectGarbage();
LOG(isolate_, ResourceEvent("markcompact", "end"));
@@ -1374,30 +1429,6 @@ void Heap::MarkCompactPrologue() {
}
-// Helper class for copying HeapObjects
-class ScavengeVisitor : public ObjectVisitor {
- public:
- explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointer(Object** p) { ScavengePointer(p); }
-
- void VisitPointers(Object** start, Object** end) {
- // Copy all HeapObject pointers in [start, end)
- for (Object** p = start; p < end; p++) ScavengePointer(p);
- }
-
- private:
- void ScavengePointer(Object** p) {
- Object* object = *p;
- if (!heap_->InNewSpace(object)) return;
- Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
- }
-
- Heap* heap_;
-};
-
-
#ifdef VERIFY_HEAP
// Visitor class to verify pointers in code or data space do not point into
// new space.
@@ -1460,56 +1491,6 @@ void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
}
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
- if (event == kStoreBufferStartScanningPagesEvent) {
- start_of_current_page_ = NULL;
- current_page_ = NULL;
- } else if (event == kStoreBufferScanningPageEvent) {
- if (current_page_ != NULL) {
- // If this page already overflowed the store buffer during this iteration.
- if (current_page_->scan_on_scavenge()) {
- // Then we should wipe out the entries that have been added for it.
- store_buffer_->SetTop(start_of_current_page_);
- } else if (store_buffer_->Top() - start_of_current_page_ >=
- (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
- // Did we find too many pointers in the previous page? The heuristic is
- // that no page can take more then 1/5 the remaining slots in the store
- // buffer.
- current_page_->set_scan_on_scavenge(true);
- store_buffer_->SetTop(start_of_current_page_);
- } else {
- // In this case the page we scanned took a reasonable number of slots in
- // the store buffer. It has now been rehabilitated and is no longer
- // marked scan_on_scavenge.
- DCHECK(!current_page_->scan_on_scavenge());
- }
- }
- start_of_current_page_ = store_buffer_->Top();
- current_page_ = page;
- } else if (event == kStoreBufferFullEvent) {
- // The current page overflowed the store buffer again. Wipe out its entries
- // in the store buffer and mark it scan-on-scavenge again. This may happen
- // several times while scanning.
- if (current_page_ == NULL) {
- // Store Buffer overflowed while scanning promoted objects. These are not
- // in any particular page, though they are likely to be clustered by the
- // allocation routines.
- store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
- } else {
- // Store Buffer overflowed while scanning a particular old space page for
- // pointers to new space.
- DCHECK(current_page_ == page);
- DCHECK(page != NULL);
- current_page_->set_scan_on_scavenge(true);
- DCHECK(start_of_current_page_ != store_buffer_->Top());
- store_buffer_->SetTop(start_of_current_page_);
- }
- } else {
- UNREACHABLE();
- }
-}
-
-
void PromotionQueue::Initialize() {
// The last to-space page may be used for promotion queue. On promotion
// conflict, we use the emergency stack.
@@ -1592,9 +1573,9 @@ void Heap::Scavenge() {
// Used for updating survived_since_last_expansion_ at function end.
intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
- SelectScavengingVisitorsTable();
+ scavenge_collector_->SelectScavengingVisitorsTable();
- PrepareArrayBufferDiscoveryInNewSpace();
+ array_buffer_tracker()->PrepareDiscoveryInNewSpace();
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
@@ -1634,7 +1615,7 @@ void Heap::Scavenge() {
GCTracer::Scope::SCAVENGER_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(this, store_buffer(),
&ScavengeStoreBufferCallback);
- store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+ store_buffer()->IteratePointersToNewSpace(&Scavenger::ScavengeObject);
}
{
@@ -1696,7 +1677,7 @@ void Heap::Scavenge() {
new_space_.LowerInlineAllocationLimit(
new_space_.inline_allocation_limit_step());
- FreeDeadArrayBuffers(true);
+ array_buffer_tracker()->FreeDead(true);
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
@@ -1791,116 +1772,6 @@ void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
}
-void Heap::RegisterNewArrayBuffer(bool in_new_space, void* data,
- size_t length) {
- if (!data) return;
- if (in_new_space) {
- live_array_buffers_for_scavenge_[data] = length;
- } else {
- live_array_buffers_[data] = length;
- }
-
- // We may go over the limit of externally allocated memory here. We call the
- // api function to trigger a GC in this case.
- reinterpret_cast<v8::Isolate*>(isolate_)
- ->AdjustAmountOfExternalAllocatedMemory(length);
-}
-
-
-void Heap::UnregisterArrayBuffer(bool in_new_space, void* data) {
- if (!data) return;
-
- std::map<void*, size_t>* live_buffers =
- in_new_space ? &live_array_buffers_for_scavenge_ : &live_array_buffers_;
- std::map<void*, size_t>* not_yet_discovered_buffers =
- in_new_space ? &not_yet_discovered_array_buffers_for_scavenge_
- : &not_yet_discovered_array_buffers_;
-
- DCHECK(live_buffers->count(data) > 0);
- live_buffers->erase(data);
- not_yet_discovered_buffers->erase(data);
-}
-
-
-void Heap::RegisterLiveArrayBuffer(bool from_scavenge, void* data) {
- // ArrayBuffer might be in the middle of being constructed.
- if (data == undefined_value()) return;
- if (from_scavenge) {
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
- } else if (!not_yet_discovered_array_buffers_.erase(data)) {
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
- }
-}
-
-
-void Heap::FreeDeadArrayBuffers(bool from_scavenge) {
- size_t freed_memory = 0;
- for (auto& buffer : not_yet_discovered_array_buffers_for_scavenge_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- live_array_buffers_for_scavenge_.erase(buffer.first);
- }
-
- if (!from_scavenge) {
- for (auto& buffer : not_yet_discovered_array_buffers_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- live_array_buffers_.erase(buffer.first);
- }
- }
-
- not_yet_discovered_array_buffers_for_scavenge_ =
- live_array_buffers_for_scavenge_;
- if (!from_scavenge) not_yet_discovered_array_buffers_ = live_array_buffers_;
-
- // Do not call through the api as this code is triggered while doing a GC.
- amount_of_external_allocated_memory_ -= freed_memory;
-}
-
-
-void Heap::TearDownArrayBuffers() {
- size_t freed_memory = 0;
- for (auto& buffer : live_array_buffers_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- }
- for (auto& buffer : live_array_buffers_for_scavenge_) {
- isolate()->array_buffer_allocator()->Free(buffer.first, buffer.second);
- freed_memory += buffer.second;
- }
- live_array_buffers_.clear();
- live_array_buffers_for_scavenge_.clear();
- not_yet_discovered_array_buffers_.clear();
- not_yet_discovered_array_buffers_for_scavenge_.clear();
-
- if (freed_memory > 0) {
- reinterpret_cast<v8::Isolate*>(isolate_)
- ->AdjustAmountOfExternalAllocatedMemory(
- -static_cast<int64_t>(freed_memory));
- }
-}
-
-
-void Heap::PrepareArrayBufferDiscoveryInNewSpace() {
- not_yet_discovered_array_buffers_for_scavenge_ =
- live_array_buffers_for_scavenge_;
-}
-
-
-void Heap::PromoteArrayBuffer(Object* obj) {
- JSArrayBuffer* buffer = JSArrayBuffer::cast(obj);
- if (buffer->is_external()) return;
- void* data = buffer->backing_store();
- if (!data) return;
- // ArrayBuffer might be in the middle of being constructed.
- if (data == undefined_value()) return;
- DCHECK(live_array_buffers_for_scavenge_.count(data) > 0);
- live_array_buffers_[data] = live_array_buffers_for_scavenge_[data];
- live_array_buffers_for_scavenge_.erase(data);
- not_yet_discovered_array_buffers_for_scavenge_.erase(data);
-}
-
-
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
Object* allocation_site_obj =
VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
@@ -1973,17 +1844,6 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
-class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
- public:
- static inline void VisitPointer(Heap* heap, Object** p) {
- Object* object = *p;
- if (!heap->InNewSpace(object)) return;
- Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
- reinterpret_cast<HeapObject*>(object));
- }
-};
-
-
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
Address new_space_front) {
do {
@@ -1995,7 +1855,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
if (!NewSpacePage::IsAtEnd(new_space_front)) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
new_space_front +=
- NewSpaceScavenger::IterateBody(object->map(), object);
+ StaticScavengeVisitor::IterateBody(object->map(), object);
} else {
new_space_front =
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
@@ -2016,8 +1876,42 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// for pointers to from semispace instead of looking for pointers
// to new space.
DCHECK(!target->IsMap());
-
- IteratePointersToFromSpace(target, size, &ScavengeObject);
+ Address obj_address = target->address();
+
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when we
+ // promote objects. But we should not record any slots in non-black
+ // objects. Grey object's slots would be rescanned.
+ // White object might not survive until the end of collection
+ // it would be a violation of the invariant to record it's slots.
+ bool record_slots = false;
+ if (incremental_marking()->IsCompacting()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(target);
+ record_slots = Marking::IsBlack(mark_bit);
+ }
+#if V8_DOUBLE_FIELDS_UNBOXING
+ LayoutDescriptorHelper helper(target->map());
+ bool has_only_tagged_fields = helper.all_fields_tagged();
+
+ if (!has_only_tagged_fields) {
+ for (int offset = 0; offset < size;) {
+ int end_of_region_offset;
+ if (helper.IsTagged(offset, size, &end_of_region_offset)) {
+ IterateAndMarkPointersToFromSpace(
+ target, obj_address + offset,
+ obj_address + end_of_region_offset, record_slots,
+ &Scavenger::ScavengeObject);
+ }
+ offset = end_of_region_offset;
+ }
+ } else {
+#endif
+ IterateAndMarkPointersToFromSpace(target, obj_address,
+ obj_address + size, record_slots,
+ &Scavenger::ScavengeObject);
+#if V8_DOUBLE_FIELDS_UNBOXING
+ }
+#endif
}
}
@@ -2096,452 +1990,13 @@ HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
}
-enum LoggingAndProfiling {
- LOGGING_AND_PROFILING_ENABLED,
- LOGGING_AND_PROFILING_DISABLED
-};
-
-
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-
-template <MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
- public:
- static void Initialize() {
- table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
- table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
- table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
- table_.Register(kVisitByteArray, &EvacuateByteArray);
- table_.Register(kVisitFixedArray, &EvacuateFixedArray);
- table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
- table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
- table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
- table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
-
- table_.Register(
- kVisitNativeContext,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- Context::kSize>);
-
- table_.Register(
- kVisitConsString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- ConsString::kSize>);
-
- table_.Register(
- kVisitSlicedString,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- SlicedString::kSize>);
-
- table_.Register(
- kVisitSymbol,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- Symbol::kSize>);
-
- table_.Register(
- kVisitSharedFunctionInfo,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- SharedFunctionInfo::kSize>);
-
- table_.Register(kVisitJSWeakCollection,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSTypedArray,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSDataView,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- table_.Register(kVisitJSRegExp,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
-
- if (marks_handling == IGNORE_MARKS) {
- table_.Register(
- kVisitJSFunction,
- &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- JSFunction::kSize>);
- } else {
- table_.Register(kVisitJSFunction, &EvacuateJSFunction);
- }
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
- kVisitDataObject, kVisitDataObjectGeneric>();
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitJSObject, kVisitJSObjectGeneric>();
-
- table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
- kVisitStruct, kVisitStructGeneric>();
- }
-
- static VisitorDispatchTable<ScavengingCallback>* GetTable() {
- return &table_;
- }
-
- private:
- enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
-
- static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
- bool should_record = false;
-#ifdef DEBUG
- should_record = FLAG_heap_stats;
-#endif
- should_record = should_record || FLAG_log_gc;
- if (should_record) {
- if (heap->new_space()->Contains(obj)) {
- heap->new_space()->RecordAllocation(obj);
- } else {
- heap->new_space()->RecordPromotion(obj);
- }
- }
- }
-
- // Helper function used by CopyObject to copy a source object to an
- // allocated target object and update the forwarding pointer in the source
- // object. Returns the target object.
- INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
- HeapObject* target, int size)) {
- // If we migrate into to-space, then the to-space top pointer should be
- // right after the target object. Incorporate double alignment
- // over-allocation.
- DCHECK(!heap->InToSpace(target) ||
- target->address() + size == heap->new_space()->top() ||
- target->address() + size + kPointerSize == heap->new_space()->top());
-
- // Make sure that we do not overwrite the promotion queue which is at
- // the end of to-space.
- DCHECK(!heap->InToSpace(target) ||
- heap->promotion_queue()->IsBelowPromotionQueue(
- heap->new_space()->top()));
-
- // Copy the content of source to target.
- heap->CopyBlock(target->address(), source->address(), size);
-
- // Set the forwarding address.
- source->set_map_word(MapWord::FromForwardingAddress(target));
-
- if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
- // Update NewSpace stats if necessary.
- RecordCopiedObject(heap, target);
- heap->OnMoveEvent(target, source, size);
- }
-
- if (marks_handling == TRANSFER_MARKS) {
- if (Marking::TransferColor(source, target)) {
- MemoryChunk::IncrementLiveBytesFromGC(target, size);
- }
- }
- }
-
- template <AllocationAlignment alignment>
- static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- Heap* heap = map->GetHeap();
-
- DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation =
- heap->new_space()->AllocateRaw(object_size, alignment);
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- // Order is important here: Set the promotion limit before storing a
- // filler for double alignment or migrating the object. Otherwise we
- // may end up overwriting promotion queue entries when we migrate the
- // object.
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-
- MigrateObject(heap, object, target, object_size);
-
- // Update slot to new target.
- *slot = target;
-
- heap->IncrementSemiSpaceCopiedObjectSize(object_size);
- return true;
- }
- return false;
- }
-
-
- template <ObjectContents object_contents, AllocationAlignment alignment>
- static inline bool PromoteObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- Heap* heap = map->GetHeap();
-
- AllocationResult allocation =
- heap->old_space()->AllocateRaw(object_size, alignment);
-
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- MigrateObject(heap, object, target, object_size);
-
- // Update slot to new target.
- *slot = target;
-
- if (object_contents == POINTER_OBJECT) {
- if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(target,
- JSFunction::kNonWeakFieldsEndOffset);
- } else {
- heap->promotion_queue()->insert(target, object_size);
- }
- }
- heap->IncrementPromotedObjectsSize(object_size);
- return true;
- }
- return false;
- }
-
-
- template <ObjectContents object_contents, AllocationAlignment alignment>
- static inline void EvacuateObject(Map* map, HeapObject** slot,
- HeapObject* object, int object_size) {
- SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
- SLOW_DCHECK(object->Size() == object_size);
- Heap* heap = map->GetHeap();
-
- if (!heap->ShouldBePromoted(object->address(), object_size)) {
- // A semi-space copy may fail due to fragmentation. In that case, we
- // try to promote the object.
- if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
- return;
- }
- }
-
- if (PromoteObject<object_contents, alignment>(map, slot, object,
- object_size)) {
- return;
- }
-
- // If promotion failed, we try to copy the object to the other semi-space
- if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
-
- UNREACHABLE();
- }
-
-
- static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
- JSFunction::kSize>(map, slot, object);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- HeapObject* target = map_word.ToForwardingAddress();
-
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- if (Marking::IsBlack(mark_bit)) {
- // This object is black and it might not be rescanned by marker.
- // We should explicitly record code entry slot for compaction because
- // promotion queue processing (IterateAndMarkPointersToFromSpace) will
- // miss it as it is not HeapObject-tagged.
- Address code_entry_slot =
- target->address() + JSFunction::kCodeEntryOffset;
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
- target, code_entry_slot, code);
- }
- }
-
-
- static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
-
- static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
- int object_size = FixedDoubleArray::SizeFor(length);
- EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
- }
-
-
- static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- FixedTypedArrayBase* target =
- reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- if (target->base_pointer() != Smi::FromInt(0))
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
- }
-
-
- static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
- EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
-
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- FixedTypedArrayBase* target =
- reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
- if (target->base_pointer() != Smi::FromInt(0))
- target->set_base_pointer(target, SKIP_WRITE_BARRIER);
- }
-
-
- static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
- HeapObject* object) {
- ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
-
- Heap* heap = map->GetHeap();
- MapWord map_word = object->map_word();
- DCHECK(map_word.IsForwardingAddress());
- HeapObject* target = map_word.ToForwardingAddress();
- if (!heap->InNewSpace(target)) heap->PromoteArrayBuffer(target);
- }
-
-
- static inline void EvacuateByteArray(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
-
-
- static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqOneByteString::cast(object)
- ->SeqOneByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
-
-
- static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
- HeapObject* object) {
- int object_size = SeqTwoByteString::cast(object)
- ->SeqTwoByteStringSize(map->instance_type());
- EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
- }
-
-
- static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
- HeapObject* object) {
- DCHECK(IsShortcutCandidate(map->instance_type()));
-
- Heap* heap = map->GetHeap();
-
- if (marks_handling == IGNORE_MARKS &&
- ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
- HeapObject* first =
- HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
- *slot = first;
-
- if (!heap->InNewSpace(first)) {
- object->set_map_word(MapWord::FromForwardingAddress(first));
- return;
- }
-
- MapWord first_word = first->map_word();
- if (first_word.IsForwardingAddress()) {
- HeapObject* target = first_word.ToForwardingAddress();
-
- *slot = target;
- object->set_map_word(MapWord::FromForwardingAddress(target));
- return;
- }
-
- Heap::ScavengeObjectSlow(slot, first);
- object->set_map_word(MapWord::FromForwardingAddress(*slot));
- return;
- }
-
- int object_size = ConsString::kSize;
- EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
- object_size);
- }
-
- template <ObjectContents object_contents>
- class ObjectEvacuationStrategy {
- public:
- template <int object_size>
- static inline void VisitSpecialized(Map* map, HeapObject** slot,
- HeapObject* object) {
- EvacuateObject<object_contents, kWordAligned>(map, slot, object,
- object_size);
- }
-
- static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
- int object_size = map->instance_size();
- EvacuateObject<object_contents, kWordAligned>(map, slot, object,
- object_size);
- }
- };
-
- static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-
-template <MarksHandling marks_handling,
- LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
- ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
-
-static void InitializeScavengingVisitorsTables() {
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
- ScavengingVisitor<TRANSFER_MARKS,
- LOGGING_AND_PROFILING_ENABLED>::Initialize();
- ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
-}
-
-
-void Heap::SelectScavengingVisitorsTable() {
- bool logging_and_profiling =
- FLAG_verify_predictable || isolate()->logger()->is_logging() ||
- isolate()->cpu_profiler()->is_profiling() ||
- (isolate()->heap_profiler() != NULL &&
- isolate()->heap_profiler()->is_tracking_object_moves());
-
- if (!incremental_marking()->IsMarking()) {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
- IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
- IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
- } else {
- if (!logging_and_profiling) {
- scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
- TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
- } else {
- scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
- TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
- }
-
- if (incremental_marking()->IsCompacting()) {
- // When compacting forbid short-circuiting of cons-strings.
- // Scavenging code relies on the fact that new space object
- // can't be evacuated into evacuation candidate but
- // short-circuiting violates this assumption.
- scavenging_visitors_table_.Register(
- StaticVisitorBase::kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(
- StaticVisitorBase::kVisitConsString));
- }
- }
+void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) {
+ return array_buffer_tracker()->RegisterNew(buffer);
}
-void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
- MapWord first_word = object->map_word();
- SLOW_DCHECK(!first_word.IsForwardingAddress());
- Map* map = first_word.ToMap();
- map->GetHeap()->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
+void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
+ return array_buffer_tracker()->Unregister(buffer);
}
@@ -2559,11 +2014,12 @@ void Heap::ConfigureInitialOldGenerationSize() {
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
- reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
+ reinterpret_cast<Map*>(result)->set_map(
+ reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
@@ -2591,8 +2047,8 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
- HeapObject* result;
- AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(meta_map());
@@ -2615,7 +2071,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
}
// Must be called only after |instance_type|, |instance_size| and
// |layout_descriptor| are set.
- map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+ map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
map->set_bit_field(0);
map->set_bit_field2(1 << Map::kIsExtensible);
int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
@@ -2630,10 +2086,10 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
AllocationSpace space) {
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
- AllocationResult allocation = AllocateRaw(size, space, space, align);
+ AllocationResult allocation = AllocateRaw(size, space, align);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
@@ -2674,7 +2130,7 @@ const Heap::StructTable Heap::struct_table[] = {
bool Heap::CreateInitialMaps() {
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
if (!allocation.To(&obj)) return false;
@@ -2910,9 +2366,9 @@ bool Heap::CreateInitialMaps() {
if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
set_empty_byte_array(byte_array);
- BytecodeArray* bytecode_array;
+ BytecodeArray* bytecode_array = nullptr;
AllocationResult allocation =
- AllocateBytecodeArray(0, nullptr, kPointerSize);
+ AllocateBytecodeArray(0, nullptr, 0, 0, empty_fixed_array());
if (!allocation.To(&bytecode_array)) {
return false;
}
@@ -2942,12 +2398,11 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
int size = HeapNumber::kSize;
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
- AllocationSpace space = SelectSpace(size, pretenure);
+ AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation =
- AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
+ AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
@@ -2963,12 +2418,12 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
int size = Type::kSize; \
STATIC_ASSERT(Type::kSize <= Page::kMaxRegularHeapObjectSize); \
\
- AllocationSpace space = SelectSpace(size, pretenure); \
+ AllocationSpace space = SelectSpace(pretenure); \
\
- HeapObject* result; \
+ HeapObject* result = nullptr; \
{ \
AllocationResult allocation = \
- AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned); \
+ AllocateRaw(size, space, kSimd128Unaligned); \
if (!allocation.To(&result)) return allocation; \
} \
\
@@ -2987,9 +2442,9 @@ AllocationResult Heap::AllocateCell(Object* value) {
int size = Cell::kSize;
STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(cell_map());
@@ -3002,8 +2457,8 @@ AllocationResult Heap::AllocatePropertyCell() {
int size = PropertyCell::kSize;
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
- HeapObject* result;
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(global_property_cell_map());
@@ -3019,9 +2474,9 @@ AllocationResult Heap::AllocatePropertyCell() {
AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
int size = WeakCell::kSize;
STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
- HeapObject* result = NULL;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(weak_cell_map());
@@ -3165,14 +2620,16 @@ void Heap::CreateInitialObjects() {
roots_[constant_string_table[i].index] = *str;
}
+ // The {hidden_string} is special because it is an empty string, but does not
+ // match any string (even the {empty_string}) when looked up in properties.
// Allocate the hidden string which is used to identify the hidden properties
// in JSObjects. The hash code has a special value so that it will not match
// the empty string when searching for the property. It cannot be part of the
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_string is zero to ensure
// that it will always be at the first entry in property descriptors.
- hidden_string_ = *factory->NewOneByteInternalizedString(
- OneByteVector("", 0), String::kEmptyStringHash);
+ set_hidden_string(*factory->NewOneByteInternalizedString(
+ OneByteVector("", 0), String::kEmptyStringHash));
// Create the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
@@ -3203,7 +2660,7 @@ void Heap::CreateInitialObjects() {
{
HandleScope scope(isolate());
-#define SYMBOL_INIT(name, varname, description) \
+#define SYMBOL_INIT(name, description) \
Handle<Symbol> name = factory->NewSymbol(); \
Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
name->set_name(*name##d); \
@@ -3243,6 +2700,9 @@ void Heap::CreateInitialObjects() {
set_extra_natives_source_cache(
*factory->NewFixedArray(ExtraNatives::GetBuiltinsCount()));
+ set_experimental_extra_natives_source_cache(
+ *factory->NewFixedArray(ExperimentalExtraNatives::GetBuiltinsCount()));
+
set_code_stub_natives_source_cache(
*factory->NewFixedArray(CodeStubNatives::GetBuiltinsCount()));
@@ -3260,9 +2720,11 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
{
- Code::Kind kinds[] = {Code::LOAD_IC, Code::KEYED_LOAD_IC, Code::STORE_IC,
- Code::KEYED_STORE_IC};
- FeedbackVectorSpec spec(0, 4, kinds);
+ FeedbackVectorSlotKind kinds[] = {FeedbackVectorSlotKind::LOAD_IC,
+ FeedbackVectorSlotKind::KEYED_LOAD_IC,
+ FeedbackVectorSlotKind::STORE_IC,
+ FeedbackVectorSlotKind::KEYED_STORE_IC};
+ StaticFeedbackVectorSpec spec(0, 4, kinds);
Handle<TypeFeedbackVector> dummy_vector =
factory->NewTypeFeedbackVector(&spec);
for (int i = 0; i < 4; i++) {
@@ -3280,6 +2742,8 @@ void Heap::CreateInitialObjects() {
*WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
TENURED));
+ set_script_list(Smi::FromInt(0));
+
Handle<SeededNumberDictionary> slow_element_dictionary =
SeededNumberDictionary::New(isolate(), 0, TENURED);
slow_element_dictionary->set_requires_slow_elements();
@@ -3287,9 +2751,14 @@ void Heap::CreateInitialObjects() {
set_materialized_objects(*factory->NewFixedArray(0, TENURED));
- // Handling of script id generation is in Factory::NewScript.
+ // Handling of script id generation is in Heap::NextScriptId().
set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+ // Allocate the empty script.
+ Handle<Script> script = factory->NewScript(factory->empty_string());
+ script->set_type(Script::TYPE_NATIVE);
+ set_empty_script(*script);
+
Handle<PropertyCell> cell = factory->NewPropertyCell();
cell->set_value(Smi::FromInt(Isolate::kArrayProtectorValid));
set_array_protector(*cell);
@@ -3323,19 +2792,6 @@ void Heap::CreateInitialObjects() {
}
-void Heap::AddPrivateGlobalSymbols(Handle<Object> private_intern_table) {
-#define ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE(name_arg) \
- { \
- Handle<Symbol> symbol(Symbol::cast(roots_[k##name_arg##RootIndex])); \
- Handle<String> name_arg##d(String::cast(symbol->name())); \
- JSObject::AddProperty(Handle<JSObject>::cast(private_intern_table), \
- name_arg##d, symbol, NONE); \
- }
- PRIVATE_SYMBOL_LIST(ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE)
-#undef ADD_SYMBOL_TO_PRIVATE_INTERN_TABLE
-}
-
-
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
switch (root_index) {
case kStoreBufferTopRootIndex:
@@ -3348,6 +2804,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kPolymorphicCodeCacheRootIndex:
case kEmptyScriptRootIndex:
case kSymbolRegistryRootIndex:
+ case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
case kAllocationSitesScratchpadRootIndex:
case kMicrotaskQueueRootIndex:
@@ -3371,99 +2828,7 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
return !RootCanBeWrittenAfterInitialization(root_index) &&
- !InNewSpace(roots_array_start()[root_index]);
-}
-
-
-Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
- Object* key_pattern, ResultsCacheType type) {
- FixedArray* cache;
- if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
- if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
- cache = heap->string_split_cache();
- } else {
- DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern->IsFixedArray());
- cache = heap->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string->Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
- }
- index =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index + kStringOffset) == key_string &&
- cache->get(index + kPatternOffset) == key_pattern) {
- return cache->get(index + kArrayOffset);
- }
- return Smi::FromInt(0);
-}
-
-
-void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
- Handle<Object> key_pattern,
- Handle<FixedArray> value_array,
- ResultsCacheType type) {
- Factory* factory = isolate->factory();
- Handle<FixedArray> cache;
- if (!key_string->IsInternalizedString()) return;
- if (type == STRING_SPLIT_SUBSTRINGS) {
- DCHECK(key_pattern->IsString());
- if (!key_pattern->IsInternalizedString()) return;
- cache = factory->string_split_cache();
- } else {
- DCHECK(type == REGEXP_MULTIPLE_INDICES);
- DCHECK(key_pattern->IsFixedArray());
- cache = factory->regexp_multiple_cache();
- }
-
- uint32_t hash = key_string->Hash();
- uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
- ~(kArrayEntriesPerCacheEntry - 1));
- if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index + kStringOffset, *key_string);
- cache->set(index + kPatternOffset, *key_pattern);
- cache->set(index + kArrayOffset, *value_array);
- } else {
- uint32_t index2 =
- ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
- if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
- cache->set(index2 + kStringOffset, *key_string);
- cache->set(index2 + kPatternOffset, *key_pattern);
- cache->set(index2 + kArrayOffset, *value_array);
- } else {
- cache->set(index2 + kStringOffset, Smi::FromInt(0));
- cache->set(index2 + kPatternOffset, Smi::FromInt(0));
- cache->set(index2 + kArrayOffset, Smi::FromInt(0));
- cache->set(index + kStringOffset, *key_string);
- cache->set(index + kPatternOffset, *key_pattern);
- cache->set(index + kArrayOffset, *value_array);
- }
- }
- // If the array is a reasonably short list of substrings, convert it into a
- // list of internalized strings.
- if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
- for (int i = 0; i < value_array->length(); i++) {
- Handle<String> str(String::cast(value_array->get(i)), isolate);
- Handle<String> internalized_str = factory->InternalizeString(str);
- value_array->set(i, *internalized_str);
- }
- }
- // Convert backing store to a copy-on-write array.
- value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
-}
-
-
-void RegExpResultsCache::Clear(FixedArray* cache) {
- for (int i = 0; i < kRegExpResultsCacheSize; i++) {
- cache->set(i, Smi::FromInt(0));
- }
+ !InNewSpace(root(root_index));
}
@@ -3522,8 +2887,8 @@ void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
// candidates are not part of the global list of old space pages and
// releasing an evacuation candidate due to a slots buffer overflow
// results in lost pages.
- mark_compact_collector()->RecordSlot(allocation_sites_scratchpad(), slot,
- *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ mark_compact_collector()->ForceRecordSlot(allocation_sites_scratchpad(),
+ slot, *slot);
}
allocation_sites_scratchpad_length_++;
}
@@ -3580,7 +2945,7 @@ AllocationResult Heap::AllocateForeign(Address address,
// Statically ensure that it is safe to allocate foreigns in paged spaces.
STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
- Foreign* result;
+ Foreign* result = nullptr;
AllocationResult allocation = Allocate(foreign_map(), space);
if (!allocation.To(&result)) return allocation;
result->set_foreign_address(address);
@@ -3593,10 +2958,10 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = ByteArray::SizeFor(length);
- AllocationSpace space = SelectSpace(size, pretenure);
- HeapObject* result;
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
}
@@ -3608,15 +2973,19 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
AllocationResult Heap::AllocateBytecodeArray(int length,
const byte* const raw_bytecodes,
- int frame_size) {
+ int frame_size,
+ int parameter_count,
+ FixedArray* constant_pool) {
if (length < 0 || length > BytecodeArray::kMaxLength) {
v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
+ // Bytecode array is pretenured, so constant pool array should be to.
+ DCHECK(!InNewSpace(constant_pool));
int size = BytecodeArray::SizeFor(length);
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
@@ -3624,6 +2993,8 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
BytecodeArray* instance = BytecodeArray::cast(result);
instance->set_length(length);
instance->set_frame_size(frame_size);
+ instance->set_parameter_count(parameter_count);
+ instance->set_constant_pool(constant_pool);
CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
return result;
@@ -3634,11 +3005,14 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
- filler->set_map_no_write_barrier(raw_unchecked_one_pointer_filler_map());
+ filler->set_map_no_write_barrier(
+ reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
} else if (size == 2 * kPointerSize) {
- filler->set_map_no_write_barrier(raw_unchecked_two_pointer_filler_map());
+ filler->set_map_no_write_barrier(
+ reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
} else {
- filler->set_map_no_write_barrier(raw_unchecked_free_space_map());
+ filler->set_map_no_write_barrier(
+ reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
FreeSpace::cast(filler)->nobarrier_set_size(size);
}
// At this point, we may be deserializing the heap from a snapshot, and
@@ -3649,6 +3023,8 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
bool Heap::CanMoveObjectStart(HeapObject* object) {
+ if (!FLAG_move_object_start) return false;
+
Address address = object->address();
if (lo_space()->Contains(object)) return false;
@@ -3793,10 +3169,10 @@ AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
int length, ExternalArrayType array_type, void* external_pointer,
PretenureFlag pretenure) {
int size = FixedTypedArrayBase::kHeaderSize;
- AllocationSpace space = SelectSpace(size, pretenure);
- HeapObject* result;
+ AllocationSpace space = SelectSpace(pretenure);
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
}
@@ -3837,11 +3213,11 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
ForFixedTypedArray(array_type, &element_size, &elements_kind);
int size = OBJECT_POINTER_ALIGN(length * element_size +
FixedTypedArrayBase::kDataOffset);
- AllocationSpace space = SelectSpace(size, pretenure);
+ AllocationSpace space = SelectSpace(pretenure);
- HeapObject* object;
+ HeapObject* object = nullptr;
AllocationResult allocation = AllocateRaw(
- size, space, OLD_SPACE,
+ size, space,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
@@ -3859,10 +3235,9 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
- AllocationResult allocation =
- AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
+ AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
- HeapObject* result;
+ HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
if (immovable) {
@@ -3896,10 +3271,10 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
AllocationResult Heap::CopyCode(Code* code) {
AllocationResult allocation;
- HeapObject* result = NULL;
+ HeapObject* result = nullptr;
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
+ allocation = AllocateRaw(obj_size, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
@@ -3921,7 +3296,7 @@ AllocationResult Heap::CopyCode(Code* code) {
AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
// Allocate ByteArray before the Code object, so that we do not risk
// leaving uninitialized Code object (and breaking the heap).
- ByteArray* reloc_info_array;
+ ByteArray* reloc_info_array = nullptr;
{
AllocationResult allocation =
AllocateByteArray(reloc_info.length(), TENURED);
@@ -3937,9 +3312,8 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
size_t relocation_offset =
static_cast<size_t>(code->instruction_end() - old_addr);
- HeapObject* result;
- AllocationResult allocation =
- AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
@@ -3985,15 +3359,12 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
AllocationSite* allocation_site) {
DCHECK(gc_state_ == NOT_IN_GC);
DCHECK(map->instance_type() != MAP_TYPE);
- // If allocation failures are disallowed, we may allocate in a different
- // space when new space is full and the object is not a large object.
- AllocationSpace retry_space = (space != NEW_SPACE) ? space : OLD_SPACE;
int size = map->instance_size();
if (allocation_site != NULL) {
size += AllocationMemento::kSize;
}
- HeapObject* result;
- AllocationResult allocation = AllocateRaw(size, space, retry_space);
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
// No need for write barrier since object is white and map is in old space.
result->set_map_no_write_barrier(map);
@@ -4052,9 +3423,8 @@ AllocationResult Heap::AllocateJSObjectFromMap(
FixedArray* properties = empty_fixed_array();
// Allocate the JSObject.
- int size = map->instance_size();
- AllocationSpace space = SelectSpace(size, pretenure);
- JSObject* js_obj;
+ AllocationSpace space = SelectSpace(pretenure);
+ JSObject* js_obj = nullptr;
AllocationResult allocation = Allocate(map, space, allocation_site);
if (!allocation.To(&js_obj)) return allocation;
@@ -4075,7 +3445,7 @@ AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
constructor->initial_map(), pretenure, allocation_site);
#ifdef DEBUG
// Make sure result is NOT a global object if valid.
- HeapObject* obj;
+ HeapObject* obj = nullptr;
DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
#endif
return allocation;
@@ -4092,69 +3462,24 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
map->instance_type() == JS_ARRAY_TYPE);
int object_size = map->instance_size();
- HeapObject* clone;
+ HeapObject* clone = nullptr;
DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
- WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+ int adjusted_object_size =
+ site != NULL ? object_size + AllocationMemento::kSize : object_size;
+ AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
+ if (!allocation.To(&clone)) return allocation;
- // If we're forced to always allocate, we use the general allocation
- // functions which may leave us with an object in old space.
- if (always_allocate()) {
- {
- AllocationResult allocation =
- AllocateRaw(object_size, NEW_SPACE, OLD_SPACE);
- if (!allocation.To(&clone)) return allocation;
- }
- Address clone_address = clone->address();
- CopyBlock(clone_address, source->address(), object_size);
-
- // Update write barrier for all tagged fields that lie beyond the header.
- const int start_offset = JSObject::kHeaderSize;
- const int end_offset = object_size;
-
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(map);
- bool has_only_tagged_fields = helper.all_fields_tagged();
-
- if (!has_only_tagged_fields) {
- for (int offset = start_offset; offset < end_offset;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
- RecordWrites(clone_address, offset,
- (end_of_region_offset - offset) / kPointerSize);
- }
- offset = end_of_region_offset;
- }
- } else {
-#endif
- // Object has only tagged fields.
- RecordWrites(clone_address, start_offset,
- (end_offset - start_offset) / kPointerSize);
-#if V8_DOUBLE_FIELDS_UNBOXING
- }
-#endif
-
- } else {
- wb_mode = SKIP_WRITE_BARRIER;
+ SLOW_DCHECK(InNewSpace(clone));
+ // Since we know the clone is allocated in new space, we can copy
+ // the contents without worrying about updating the write barrier.
+ CopyBlock(clone->address(), source->address(), object_size);
- {
- int adjusted_object_size =
- site != NULL ? object_size + AllocationMemento::kSize : object_size;
- AllocationResult allocation =
- AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
- if (!allocation.To(&clone)) return allocation;
- }
- SLOW_DCHECK(InNewSpace(clone));
- // Since we know the clone is allocated in new space, we can copy
- // the contents without worrying about updating the write barrier.
- CopyBlock(clone->address(), source->address(), object_size);
-
- if (site != NULL) {
- AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
- reinterpret_cast<Address>(clone) + object_size);
- InitializeAllocationMemento(alloc_memento, site);
- }
+ if (site != NULL) {
+ AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+ reinterpret_cast<Address>(clone) + object_size);
+ InitializeAllocationMemento(alloc_memento, site);
}
SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
@@ -4163,7 +3488,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
if (elements->length() > 0) {
- FixedArrayBase* elem;
+ FixedArrayBase* elem = nullptr;
{
AllocationResult allocation;
if (elements->map() == fixed_cow_array_map()) {
@@ -4175,16 +3500,16 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
}
if (!allocation.To(&elem)) return allocation;
}
- JSObject::cast(clone)->set_elements(elem, wb_mode);
+ JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
}
// Update properties if necessary.
if (properties->length() > 0) {
- FixedArray* prop;
+ FixedArray* prop = nullptr;
{
AllocationResult allocation = CopyFixedArray(properties);
if (!allocation.To(&prop)) return allocation;
}
- JSObject::cast(clone)->set_properties(prop, wb_mode);
+ JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
}
// Return the new clone.
return clone;
@@ -4254,12 +3579,11 @@ AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
map = internalized_string_map();
size = SeqTwoByteString::SizeFor(chars);
}
- AllocationSpace space = SelectSpace(size, TENURED);
// Allocate string.
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
@@ -4297,11 +3621,11 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
DCHECK_GE(String::kMaxLength, length);
int size = SeqOneByteString::SizeFor(length);
DCHECK(size <= SeqOneByteString::kMaxSize);
- AllocationSpace space = SelectSpace(size, pretenure);
+ AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
}
@@ -4321,11 +3645,11 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
DCHECK_GE(String::kMaxLength, length);
int size = SeqTwoByteString::SizeFor(length);
DCHECK(size <= SeqTwoByteString::kMaxSize);
- AllocationSpace space = SelectSpace(size, pretenure);
+ AllocationSpace space = SelectSpace(pretenure);
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
}
@@ -4340,9 +3664,9 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
AllocationResult Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
- HeapObject* result;
+ HeapObject* result = nullptr;
{
- AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
+ AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
@@ -4358,7 +3682,7 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
}
int len = src->length();
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
if (!allocation.To(&obj)) return allocation;
@@ -4391,7 +3715,7 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
int old_len = src->length();
int new_len = old_len + grow_by;
DCHECK(new_len >= old_len);
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
if (!allocation.To(&obj)) return allocation;
@@ -4411,7 +3735,7 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
int len = src->length();
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
@@ -4437,7 +3761,7 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
Map* map) {
int len = src->length();
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
@@ -4456,9 +3780,9 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
}
int size = FixedArray::SizeFor(length);
- AllocationSpace space = SelectSpace(size, pretenure);
+ AllocationSpace space = SelectSpace(pretenure);
- return AllocateRaw(size, space, OLD_SPACE);
+ return AllocateRaw(size, space);
}
@@ -4492,7 +3816,7 @@ AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
if (length == 0) return empty_fixed_array();
- HeapObject* obj;
+ HeapObject* obj = nullptr;
{
AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
@@ -4508,7 +3832,7 @@ AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array();
- HeapObject* elements;
+ HeapObject* elements = nullptr;
AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
if (!allocation.To(&elements)) return allocation;
@@ -4525,12 +3849,11 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
kDoubleAligned);
}
int size = FixedDoubleArray::SizeFor(length);
- AllocationSpace space = SelectSpace(size, pretenure);
+ AllocationSpace space = SelectSpace(pretenure);
- HeapObject* object;
+ HeapObject* object = nullptr;
{
- AllocationResult allocation =
- AllocateRaw(size, space, OLD_SPACE, kDoubleAligned);
+ AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
if (!allocation.To(&object)) return allocation;
}
@@ -4542,9 +3865,8 @@ AllocationResult Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
- HeapObject* result = NULL;
- AllocationResult allocation =
- AllocateRaw(Symbol::kSize, OLD_SPACE, OLD_SPACE);
+ HeapObject* result = nullptr;
+ AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(symbol_map());
@@ -4561,7 +3883,7 @@ AllocationResult Heap::AllocateSymbol() {
Symbol::cast(result)
->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
Symbol::cast(result)->set_name(undefined_value());
- Symbol::cast(result)->set_flags(Smi::FromInt(0));
+ Symbol::cast(result)->set_flags(0);
DCHECK(!Symbol::cast(result)->is_private());
return result;
@@ -4582,10 +3904,9 @@ AllocationResult Heap::AllocateStruct(InstanceType type) {
return exception();
}
int size = map->instance_size();
- AllocationSpace space = SelectSpace(size, TENURED);
- Struct* result;
+ Struct* result = nullptr;
{
- AllocationResult allocation = Allocate(map, space);
+ AllocationResult allocation = Allocate(map, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->InitializeBody(size);
@@ -4711,24 +4032,39 @@ void Heap::ReduceNewSpaceSize() {
}
-bool Heap::TryFinalizeIdleIncrementalMarking(
- double idle_time_in_ms, size_t size_of_objects,
- size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) {
+ if (FLAG_overapproximate_weak_closure && incremental_marking()->IsMarking() &&
+ (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+ (!incremental_marking()->weak_closure_was_overapproximated() &&
+ mark_compact_collector()->marking_deque()->IsEmpty()))) {
+ OverApproximateWeakClosure(comment);
+ } else if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector()->marking_deque()->IsEmpty())) {
+ CollectAllGarbage(current_gc_flags_, comment);
+ }
+}
+
+
+bool Heap::TryFinalizeIdleIncrementalMarking(double idle_time_in_ms) {
+ size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
+ size_t final_incremental_mark_compact_speed_in_bytes_per_ms =
+ static_cast<size_t>(
+ tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
if (FLAG_overapproximate_weak_closure &&
(incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
(!incremental_marking()->weak_closure_was_overapproximated() &&
- mark_compact_collector_.marking_deque()->IsEmpty() &&
- gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
+ mark_compact_collector()->marking_deque()->IsEmpty() &&
+ gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
static_cast<size_t>(idle_time_in_ms))))) {
OverApproximateWeakClosure(
"Idle notification: overapproximate weak closure");
return true;
} else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector_.marking_deque()->IsEmpty() &&
- gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
+ (mark_compact_collector()->marking_deque()->IsEmpty() &&
+ gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
static_cast<size_t>(idle_time_in_ms), size_of_objects,
final_incremental_mark_compact_speed_in_bytes_per_ms))) {
- CollectAllGarbage(current_gc_flags(),
+ CollectAllGarbage(current_gc_flags_,
"idle notification: finalize incremental");
return true;
}
@@ -4736,76 +4072,34 @@ bool Heap::TryFinalizeIdleIncrementalMarking(
}
-GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
- GCIdleTimeHandler::HeapState heap_state;
+GCIdleTimeHeapState Heap::ComputeHeapState() {
+ GCIdleTimeHeapState heap_state;
heap_state.contexts_disposed = contexts_disposed_;
heap_state.contexts_disposal_rate =
tracer()->ContextDisposalRateInMilliseconds();
heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
- heap_state.sweeping_in_progress =
- mark_compact_collector()->sweeping_in_progress();
- heap_state.sweeping_completed =
- mark_compact_collector()->IsSweepingCompleted();
- heap_state.mark_compact_speed_in_bytes_per_ms =
- static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
- heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
- tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
- heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
- static_cast<size_t>(
- tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
- heap_state.scavenge_speed_in_bytes_per_ms =
- static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
- heap_state.used_new_space_size = new_space_.Size();
- heap_state.new_space_capacity = new_space_.Capacity();
- heap_state.new_space_allocation_throughput_in_bytes_per_ms =
- tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
return heap_state;
}
-double Heap::AdvanceIncrementalMarking(
- intptr_t step_size_in_bytes, double deadline_in_ms,
- IncrementalMarking::StepActions step_actions) {
- DCHECK(!incremental_marking()->IsStopped());
-
- if (step_size_in_bytes == 0) {
- step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
- static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
- static_cast<size_t>(
- tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
- }
-
- double remaining_time_in_ms = 0.0;
- do {
- incremental_marking()->Step(
- step_size_in_bytes, step_actions.completion_action,
- step_actions.force_marking, step_actions.force_completion);
- remaining_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
- } while (remaining_time_in_ms >=
- 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
- !incremental_marking()->IsComplete() &&
- !mark_compact_collector_.marking_deque()->IsEmpty());
- return remaining_time_in_ms;
-}
-
-
bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
- GCIdleTimeHandler::HeapState heap_state,
+ GCIdleTimeHeapState heap_state,
double deadline_in_ms) {
bool result = false;
switch (action.type) {
case DONE:
result = true;
break;
- case DO_INCREMENTAL_MARKING: {
- const double remaining_idle_time_in_ms =
- AdvanceIncrementalMarking(action.parameter, deadline_in_ms,
- IncrementalMarking::IdleStepActions());
- if (remaining_idle_time_in_ms > 0.0) {
- action.additional_work = TryFinalizeIdleIncrementalMarking(
- remaining_idle_time_in_ms, heap_state.size_of_objects,
- heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
+ case DO_INCREMENTAL_STEP: {
+ if (incremental_marking()->incremental_marking_job()->IdleTaskPending()) {
+ result = true;
+ } else {
+ incremental_marking()
+ ->incremental_marking_job()
+ ->NotifyIdleTaskProgress();
+ result = IncrementalMarkingJob::IdleTask::Step(this, deadline_in_ms) ==
+ IncrementalMarkingJob::IdleTask::kDone;
}
break;
}
@@ -4815,12 +4109,6 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
break;
}
- case DO_SCAVENGE:
- CollectGarbage(NEW_SPACE, "idle notification: scavenge");
- break;
- case DO_FINALIZE_SWEEPING:
- mark_compact_collector()->EnsureSweepingCompleted();
- break;
case DO_NOTHING:
break;
}
@@ -4830,7 +4118,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
void Heap::IdleNotificationEpilogue(GCIdleTimeAction action,
- GCIdleTimeHandler::HeapState heap_state,
+ GCIdleTimeHeapState heap_state,
double start_ms, double deadline_in_ms) {
double idle_time_in_ms = deadline_in_ms - start_ms;
double current_time = MonotonicallyIncreasingTimeInMs();
@@ -4890,7 +4178,7 @@ void Heap::CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
event.time_ms = now_ms;
event.can_start_incremental_gc = incremental_marking()->IsStopped() &&
incremental_marking()->CanBeActivated();
- memory_reducer_.NotifyBackgroundIdleNotification(event);
+ memory_reducer_->NotifyBackgroundIdleNotification(event);
optimize_for_memory_usage_ = true;
} else {
optimize_for_memory_usage_ = false;
@@ -4927,10 +4215,10 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
tracer()->SampleAllocation(start_ms, NewSpaceAllocationCounter(),
OldGenerationAllocationCounter());
- GCIdleTimeHandler::HeapState heap_state = ComputeHeapState();
+ GCIdleTimeHeapState heap_state = ComputeHeapState();
GCIdleTimeAction action =
- gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
+ gc_idle_time_handler_->Compute(idle_time_in_ms, heap_state);
bool result = PerformIdleTimeAction(action, heap_state, deadline_in_ms);
@@ -5094,9 +4382,9 @@ void Heap::Verify() {
lo_space_->Verify();
- mark_compact_collector_.VerifyWeakEmbeddedObjectsInCode();
+ mark_compact_collector()->VerifyWeakEmbeddedObjectsInCode();
if (FLAG_omit_map_checks_for_leaf_maps) {
- mark_compact_collector_.VerifyOmittedMapChecks();
+ mark_compact_collector()->VerifyOmittedMapChecks();
}
}
#endif
@@ -5150,68 +4438,6 @@ void Heap::IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
}
-void Heap::IteratePointersToFromSpace(HeapObject* target, int size,
- ObjectSlotCallback callback) {
- Address obj_address = target->address();
-
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(target);
- record_slots = Marking::IsBlack(mark_bit);
- }
-
- // Do not scavenge JSArrayBuffer's contents
- switch (target->ContentType()) {
- case HeapObjectContents::kTaggedValues: {
- IterateAndMarkPointersToFromSpace(target, obj_address, obj_address + size,
- record_slots, callback);
- break;
- }
- case HeapObjectContents::kMixedValues: {
- if (target->IsFixedTypedArrayBase()) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + FixedTypedArrayBase::kBasePointerOffset,
- obj_address + FixedTypedArrayBase::kHeaderSize, record_slots,
- callback);
- } else if (target->IsJSArrayBuffer()) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address,
- obj_address + JSArrayBuffer::kByteLengthOffset + kPointerSize,
- record_slots, callback);
- IterateAndMarkPointersToFromSpace(
- target, obj_address + JSArrayBuffer::kSize, obj_address + size,
- record_slots, callback);
-#if V8_DOUBLE_FIELDS_UNBOXING
- } else if (FLAG_unbox_double_fields) {
- LayoutDescriptorHelper helper(target->map());
- DCHECK(!helper.all_fields_tagged());
-
- for (int offset = 0; offset < size;) {
- int end_of_region_offset;
- if (helper.IsTagged(offset, size, &end_of_region_offset)) {
- IterateAndMarkPointersToFromSpace(
- target, obj_address + offset,
- obj_address + end_of_region_offset, record_slots, callback);
- }
- offset = end_of_region_offset;
- }
-#endif
- }
- break;
- }
- case HeapObjectContents::kRawValues: {
- break;
- }
- }
-}
-
-
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
@@ -5241,9 +4467,6 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
- v->VisitPointer(bit_cast<Object**>(&hidden_string_));
- v->Synchronize(VisitorSynchronization::kInternalizedString);
-
isolate_->bootstrapper()->Iterate(v);
v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
@@ -5529,7 +4752,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
GetFromRingBuffer(stats->last_few_messages);
if (stats->js_stacktrace != NULL) {
FixedStringAllocator fixed(stats->js_stacktrace, kStacktraceBufferSize - 1);
- StringStream accumulator(&fixed);
+ StringStream accumulator(&fixed, StringStream::kPrintObjectConcise);
isolate()->PrintStack(&accumulator, Isolate::kPrintStackVerbose);
}
}
@@ -5648,7 +4871,7 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
factor = Min(factor, kMaxHeapGrowingFactorMemoryConstrained);
}
- if (memory_reducer_.ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
+ if (memory_reducer_->ShouldGrowHeapSlowly() || optimize_for_memory_usage_) {
factor = Min(factor, kConservativeHeapGrowingFactor);
}
@@ -5712,11 +4935,22 @@ void Heap::DisableInlineAllocation() {
}
+void Heap::LowerInlineAllocationLimit(intptr_t step) {
+ new_space()->LowerInlineAllocationLimit(step);
+}
+
+
+void Heap::ResetInlineAllocationLimit() {
+ new_space()->LowerInlineAllocationLimit(
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
+}
+
+
V8_DECLARE_ONCE(initialize_gc_once);
static void InitializeGCOnce() {
- InitializeScavengingVisitorsTables();
- NewSpaceScavenger::Initialize();
+ Scavenger::Initialize();
+ StaticScavengeVisitor::Initialize();
MarkCompactCollector::Initialize();
}
@@ -5746,6 +4980,9 @@ bool Heap::SetUp() {
if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
return false;
+ // Initialize incremental marking.
+ incremental_marking_ = new IncrementalMarking(this);
+
// Set up new space.
if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
return false;
@@ -5793,6 +5030,22 @@ bool Heap::SetUp() {
deferred_counters_[i] = 0;
}
+ tracer_ = new GCTracer(this);
+
+ scavenge_collector_ = new Scavenger(this);
+
+ mark_compact_collector_ = new MarkCompactCollector(this);
+
+ gc_idle_time_handler_ = new GCIdleTimeHandler();
+
+ memory_reducer_ = new MemoryReducer(this);
+
+ object_stats_ = new ObjectStats(this);
+ object_stats_->ClearObjectStats(true);
+
+ scavenge_job_ = new ScavengeJob();
+
+ array_buffer_tracker_ = new ArrayBufferTracker(this);
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
@@ -5801,6 +5054,8 @@ bool Heap::SetUp() {
mark_compact_collector()->SetUp();
+ ResetInlineAllocationLimit();
+
return true;
}
@@ -5835,6 +5090,12 @@ void Heap::SetStackLimits() {
}
+void Heap::PrintAlloctionsHash() {
+ uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+ PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count(), hash);
+}
+
+
void Heap::NotifyDeserializationComplete() {
deserialization_complete_ = true;
#ifdef DEBUG
@@ -5865,8 +5126,9 @@ void Heap::TearDown() {
PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
- PrintF("total_marking_time=%.1f ", tracer_.cumulative_marking_duration());
- PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
+ PrintF("total_marking_time=%.1f ", tracer()->cumulative_marking_duration());
+ PrintF("total_sweeping_time=%.1f ",
+ tracer()->cumulative_sweeping_duration());
PrintF("\n\n");
}
@@ -5891,32 +5153,58 @@ void Heap::TearDown() {
PrintAlloctionsHash();
}
- memory_reducer_.TearDown();
+ delete scavenge_collector_;
+ scavenge_collector_ = nullptr;
+
+ if (mark_compact_collector_ != nullptr) {
+ mark_compact_collector_->TearDown();
+ delete mark_compact_collector_;
+ mark_compact_collector_ = nullptr;
+ }
+
+ delete incremental_marking_;
+ incremental_marking_ = nullptr;
+
+ delete gc_idle_time_handler_;
+ gc_idle_time_handler_ = nullptr;
+
+ if (memory_reducer_ != nullptr) {
+ memory_reducer_->TearDown();
+ delete memory_reducer_;
+ memory_reducer_ = nullptr;
+ }
+
+ delete object_stats_;
+ object_stats_ = nullptr;
+
+ delete scavenge_job_;
+ scavenge_job_ = nullptr;
+
+ WaitUntilUnmappingOfFreeChunksCompleted();
- TearDownArrayBuffers();
+ delete array_buffer_tracker_;
+ array_buffer_tracker_ = nullptr;
isolate_->global_handles()->TearDown();
external_string_table_.TearDown();
- mark_compact_collector()->TearDown();
+ delete tracer_;
+ tracer_ = nullptr;
new_space_.TearDown();
if (old_space_ != NULL) {
- old_space_->TearDown();
delete old_space_;
old_space_ = NULL;
}
if (code_space_ != NULL) {
- code_space_->TearDown();
delete code_space_;
code_space_ = NULL;
}
if (map_space_ != NULL) {
- map_space_->TearDown();
delete map_space_;
map_space_ = NULL;
}
@@ -6226,31 +5514,15 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
};
-HeapIterator::HeapIterator(Heap* heap)
- : make_heap_iterable_helper_(heap),
- no_heap_allocation_(),
- heap_(heap),
- filtering_(HeapIterator::kNoFiltering),
- filter_(NULL) {
- Init();
-}
-
-
HeapIterator::HeapIterator(Heap* heap,
HeapIterator::HeapObjectsFiltering filtering)
: make_heap_iterable_helper_(heap),
no_heap_allocation_(),
heap_(heap),
filtering_(filtering),
- filter_(NULL) {
- Init();
-}
-
-
-HeapIterator::~HeapIterator() { Shutdown(); }
-
-
-void HeapIterator::Init() {
+ filter_(nullptr),
+ space_iterator_(nullptr),
+ object_iterator_(nullptr) {
// Start the iteration.
space_iterator_ = new SpaceIterator(heap_);
switch (filtering_) {
@@ -6264,35 +5536,33 @@ void HeapIterator::Init() {
}
-void HeapIterator::Shutdown() {
+HeapIterator::~HeapIterator() {
#ifdef DEBUG
// Assert that in filtering mode we have iterated through all
// objects. Otherwise, heap will be left in an inconsistent state.
if (filtering_ != kNoFiltering) {
- DCHECK(object_iterator_ == NULL);
+ DCHECK(object_iterator_ == nullptr);
}
#endif
// Make sure the last iterator is deallocated.
+ delete object_iterator_;
delete space_iterator_;
- space_iterator_ = NULL;
- object_iterator_ = NULL;
delete filter_;
- filter_ = NULL;
}
HeapObject* HeapIterator::next() {
- if (filter_ == NULL) return NextObject();
+ if (filter_ == nullptr) return NextObject();
HeapObject* obj = NextObject();
- while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
+ while ((obj != nullptr) && (filter_->SkipObject(obj))) obj = NextObject();
return obj;
}
HeapObject* HeapIterator::NextObject() {
// No iterator means we are done.
- if (object_iterator_ == NULL) return NULL;
+ if (object_iterator_ == nullptr) return nullptr;
if (HeapObject* obj = object_iterator_->next_object()) {
// If the current iterator has more objects we are fine.
@@ -6307,15 +5577,8 @@ HeapObject* HeapIterator::NextObject() {
}
}
// Done with the last space.
- object_iterator_ = NULL;
- return NULL;
-}
-
-
-void HeapIterator::reset() {
- // Restart the iterator.
- Shutdown();
- Init();
+ object_iterator_ = nullptr;
+ return nullptr;
}
@@ -6388,7 +5651,7 @@ void PathTracer::TracePathFrom(Object** root) {
static bool SafeIsNativeContext(HeapObject* obj) {
- return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
+ return obj->map() == obj->GetHeap()->root(Heap::kNativeContextMapRootIndex);
}
@@ -6597,7 +5860,7 @@ void DescriptorLookupCache::Clear() {
}
-void ExternalStringTable::CleanUp() {
+void Heap::ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
if (new_space_strings_[i] == heap_->the_hole_value()) {
@@ -6632,7 +5895,7 @@ void ExternalStringTable::CleanUp() {
}
-void ExternalStringTable::TearDown() {
+void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
}
@@ -6644,56 +5907,80 @@ void ExternalStringTable::TearDown() {
}
+class Heap::UnmapFreeMemoryTask : public v8::Task {
+ public:
+ UnmapFreeMemoryTask(Heap* heap, MemoryChunk* head)
+ : heap_(heap), head_(head) {}
+ virtual ~UnmapFreeMemoryTask() {}
+
+ private:
+ // v8::Task overrides.
+ void Run() override {
+ heap_->FreeQueuedChunks(head_);
+ heap_->pending_unmapping_tasks_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ MemoryChunk* head_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
+};
+
+
+void Heap::WaitUntilUnmappingOfFreeChunksCompleted() {
+ while (concurrent_unmapping_tasks_active_ > 0) {
+ pending_unmapping_tasks_semaphore_.Wait();
+ concurrent_unmapping_tasks_active_--;
+ }
+}
+
+
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+ // PreFree logically frees the memory chunk. However, the actual freeing
+ // will happen on a separate thread sometime later.
+ isolate_->memory_allocator()->PreFreeMemory(chunk);
+
+ // The chunks added to this queue will be freed by a concurrent thread.
chunk->set_next_chunk(chunks_queued_for_free_);
chunks_queued_for_free_ = chunk;
}
-void Heap::FreeQueuedChunks() {
+void Heap::FilterStoreBufferEntriesOnAboutToBeFreedPages() {
if (chunks_queued_for_free_ == NULL) return;
MemoryChunk* next;
MemoryChunk* chunk;
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+ }
+ store_buffer()->Compact();
+ store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+}
- if (chunk->owner()->identity() == LO_SPACE) {
- // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
- // If FromAnyPointerAddress encounters a slot that belongs to a large
- // chunk queued for deletion it will fail to find the chunk because
- // it try to perform a search in the list of pages owned by of the large
- // object space and queued chunks were detached from that list.
- // To work around this we split large chunk into normal kPageSize aligned
- // pieces and initialize size, owner and flags field of every piece.
- // If FromAnyPointerAddress encounters a slot that belongs to one of
- // these smaller pieces it will treat it as a slot on a normal Page.
- Address chunk_end = chunk->address() + chunk->size();
- MemoryChunk* inner =
- MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
- MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
- while (inner <= inner_last) {
- // Size of a large chunk is always a multiple of
- // OS::AllocateAlignment() so there is always
- // enough space for a fake MemoryChunk header.
- Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
- // Guard against overflow.
- if (area_end < inner->address()) area_end = chunk_end;
- inner->SetArea(inner->address(), area_end);
- inner->set_size(Page::kPageSize);
- inner->set_owner(lo_space());
- inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
- inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
- }
- }
+
+void Heap::FreeQueuedChunks() {
+ if (chunks_queued_for_free_ != NULL) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new UnmapFreeMemoryTask(this, chunks_queued_for_free_),
+ v8::Platform::kShortRunningTask);
+ chunks_queued_for_free_ = NULL;
+ } else {
+ // If we do not have anything to unmap, we just signal the semaphore
+ // that we are done.
+ pending_unmapping_tasks_semaphore_.Signal();
}
- isolate_->heap()->store_buffer()->Compact();
- isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
- for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+ concurrent_unmapping_tasks_active_++;
+}
+
+
+void Heap::FreeQueuedChunks(MemoryChunk* list_head) {
+ MemoryChunk* next;
+ MemoryChunk* chunk;
+ for (chunk = list_head; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
- isolate_->memory_allocator()->Free(chunk);
+ isolate_->memory_allocator()->PerformFreeMemory(chunk);
}
- chunks_queued_for_free_ = NULL;
}
@@ -6712,124 +5999,6 @@ void Heap::RememberUnmappedPage(Address page, bool compacted) {
}
-void Heap::ClearObjectStats(bool clear_last_time_stats) {
- memset(object_counts_, 0, sizeof(object_counts_));
- memset(object_sizes_, 0, sizeof(object_sizes_));
- if (clear_last_time_stats) {
- memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
- memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
- }
-}
-
-
-static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-
-
-void Heap::TraceObjectStat(const char* name, int count, int size, double time) {
- PrintIsolate(isolate_,
- "heap:%p, time:%f, gc:%d, type:%s, count:%d, size:%d\n",
- static_cast<void*>(this), time, ms_count_, name, count, size);
-}
-
-
-void Heap::TraceObjectStats() {
- base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
- int index;
- int count;
- int size;
- int total_size = 0;
- double time = isolate_->time_millis_since_init();
-#define TRACE_OBJECT_COUNT(name) \
- count = static_cast<int>(object_counts_[name]); \
- size = static_cast<int>(object_sizes_[name]) / KB; \
- total_size += size; \
- TraceObjectStat(#name, count, size, time);
- INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-#define TRACE_OBJECT_COUNT(name) \
- index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
- count = static_cast<int>(object_counts_[index]); \
- size = static_cast<int>(object_sizes_[index]) / KB; \
- TraceObjectStat("*CODE_" #name, count, size, time);
- CODE_KIND_LIST(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-#define TRACE_OBJECT_COUNT(name) \
- index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
- count = static_cast<int>(object_counts_[index]); \
- size = static_cast<int>(object_sizes_[index]) / KB; \
- TraceObjectStat("*FIXED_ARRAY_" #name, count, size, time);
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-#define TRACE_OBJECT_COUNT(name) \
- index = \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
- count = static_cast<int>(object_counts_[index]); \
- size = static_cast<int>(object_sizes_[index]) / KB; \
- TraceObjectStat("*CODE_AGE_" #name, count, size, time);
- CODE_AGE_LIST_COMPLETE(TRACE_OBJECT_COUNT)
-#undef TRACE_OBJECT_COUNT
-}
-
-
-void Heap::CheckpointObjectStats() {
- base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
- Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- counters->count_of_##name()->Increment( \
- static_cast<int>(object_counts_[name])); \
- counters->count_of_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[name])); \
- counters->size_of_##name()->Increment( \
- static_cast<int>(object_sizes_[name])); \
- counters->size_of_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[name]));
- INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
- int index;
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
- counters->count_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_CODE_TYPE_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_CODE_TYPE_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
- counters->count_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_FIXED_ARRAY_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
- index = \
- FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
- counters->count_of_CODE_AGE_##name()->Increment( \
- static_cast<int>(object_counts_[index])); \
- counters->count_of_CODE_AGE_##name()->Decrement( \
- static_cast<int>(object_counts_last_time_[index])); \
- counters->size_of_CODE_AGE_##name()->Increment( \
- static_cast<int>(object_sizes_[index])); \
- counters->size_of_CODE_AGE_##name()->Decrement( \
- static_cast<int>(object_sizes_last_time_[index]));
- CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-
- MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
- MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
- ClearObjectStats();
-}
-
-
void Heap::RegisterStrongRoots(Object** start, Object** end) {
StrongRootsList* list = new StrongRootsList();
list->next = strong_roots_list_;
@@ -6859,9 +6028,26 @@ void Heap::UnregisterStrongRoots(Object** start) {
}
+size_t Heap::NumberOfTrackedHeapObjectTypes() {
+ return ObjectStats::OBJECT_STATS_COUNT;
+}
+
+
+size_t Heap::ObjectCountAtLastGC(size_t index) {
+ if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
+ return object_stats_->object_count_last_gc(index);
+}
+
+
+size_t Heap::ObjectSizeAtLastGC(size_t index) {
+ if (index >= ObjectStats::OBJECT_STATS_COUNT) return 0;
+ return object_stats_->object_size_last_gc(index);
+}
+
+
bool Heap::GetObjectTypeName(size_t index, const char** object_type,
const char** object_sub_type) {
- if (index >= OBJECT_STATS_COUNT) return false;
+ if (index >= ObjectStats::OBJECT_STATS_COUNT) return false;
switch (static_cast<int>(index)) {
#define COMPARE_AND_RETURN_NAME(name) \
@@ -6871,29 +6057,37 @@ bool Heap::GetObjectTypeName(size_t index, const char** object_type,
return true;
INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case FIRST_CODE_KIND_SUB_TYPE + Code::name: \
- *object_type = "CODE_TYPE"; \
- *object_sub_type = "CODE_KIND/" #name; \
+#define COMPARE_AND_RETURN_NAME(name) \
+ case ObjectStats::FIRST_CODE_KIND_SUB_TYPE + Code::name: \
+ *object_type = "CODE_TYPE"; \
+ *object_sub_type = "CODE_KIND/" #name; \
return true;
CODE_KIND_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case FIRST_FIXED_ARRAY_SUB_TYPE + name: \
- *object_type = "FIXED_ARRAY_TYPE"; \
- *object_sub_type = #name; \
+#define COMPARE_AND_RETURN_NAME(name) \
+ case ObjectStats::FIRST_FIXED_ARRAY_SUB_TYPE + name: \
+ *object_type = "FIXED_ARRAY_TYPE"; \
+ *object_sub_type = #name; \
return true;
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
-#define COMPARE_AND_RETURN_NAME(name) \
- case FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge: \
- *object_type = "CODE_TYPE"; \
- *object_sub_type = "CODE_AGE/" #name; \
+#define COMPARE_AND_RETURN_NAME(name) \
+ case ObjectStats::FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - \
+ Code::kFirstCodeAge: \
+ *object_type = "CODE_TYPE"; \
+ *object_sub_type = "CODE_AGE/" #name; \
return true;
CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME)
#undef COMPARE_AND_RETURN_NAME
}
return false;
}
+
+
+// static
+int Heap::GetStaticVisitorIdForMap(Map* map) {
+ return StaticVisitorBase::GetVisitorId(map);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index a33a98226c..0e427de1c9 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -8,15 +8,13 @@
#include <cmath>
#include <map>
+// Clients of this interface shouldn't depend on lots of heap internals.
+// Do not include anything from src/heap here!
#include "src/allocation.h"
#include "src/assert-scope.h"
+#include "src/atomic-utils.h"
#include "src/globals.h"
-#include "src/heap/gc-idle-time-handler.h"
-#include "src/heap/gc-tracer.h"
-#include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
-#include "src/heap/memory-reducer.h"
-#include "src/heap/objects-visiting.h"
+// TODO(mstarzinger): Two more includes to kill!
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
#include "src/list.h"
@@ -38,6 +36,7 @@ namespace internal {
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(String, empty_string, empty_string) \
+ V(String, hidden_string, hidden_string) \
V(Oddball, uninitialized_value, UninitializedValue) \
V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
@@ -47,10 +46,13 @@ namespace internal {
V(Map, mutable_heap_number_map, MutableHeapNumberMap) \
V(Map, float32x4_map, Float32x4Map) \
V(Map, int32x4_map, Int32x4Map) \
+ V(Map, uint32x4_map, Uint32x4Map) \
V(Map, bool32x4_map, Bool32x4Map) \
V(Map, int16x8_map, Int16x8Map) \
+ V(Map, uint16x8_map, Uint16x8Map) \
V(Map, bool16x8_map, Bool16x8Map) \
V(Map, int8x16_map, Int8x16Map) \
+ V(Map, uint8x16_map, Uint8x16Map) \
V(Map, bool8x16_map, Bool8x16Map) \
V(Map, native_context_map, NativeContextMap) \
V(Map, fixed_array_map, FixedArrayMap) \
@@ -162,12 +164,15 @@ namespace internal {
V(FixedArray, experimental_natives_source_cache, \
ExperimentalNativesSourceCache) \
V(FixedArray, extra_natives_source_cache, ExtraNativesSourceCache) \
+ V(FixedArray, experimental_extra_natives_source_cache, \
+ ExperimentalExtraNativesSourceCache) \
V(FixedArray, code_stub_natives_source_cache, CodeStubNativesSourceCache) \
V(Script, empty_script, EmptyScript) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
V(Cell, undefined_cell, UndefinedCell) \
V(JSObject, observation_state, ObservationState) \
V(Object, symbol_registry, SymbolRegistry) \
+ V(Object, script_list, ScriptList) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
V(FixedArray, materialized_objects, MaterializedObjects) \
@@ -213,20 +218,27 @@ namespace internal {
V(Boolean_string, "Boolean") \
V(callee_string, "callee") \
V(constructor_string, "constructor") \
+ V(default_string, "default") \
V(dot_result_string, ".result") \
V(eval_string, "eval") \
V(float32x4_string, "float32x4") \
V(Float32x4_string, "Float32x4") \
V(int32x4_string, "int32x4") \
V(Int32x4_string, "Int32x4") \
+ V(uint32x4_string, "uint32x4") \
+ V(Uint32x4_string, "Uint32x4") \
V(bool32x4_string, "bool32x4") \
V(Bool32x4_string, "Bool32x4") \
V(int16x8_string, "int16x8") \
V(Int16x8_string, "Int16x8") \
+ V(uint16x8_string, "uint16x8") \
+ V(Uint16x8_string, "Uint16x8") \
V(bool16x8_string, "bool16x8") \
V(Bool16x8_string, "Bool16x8") \
V(int8x16_string, "int8x16") \
V(Int8x16_string, "Int8x16") \
+ V(uint8x16_string, "uint8x16") \
+ V(Uint8x16_string, "Uint8x16") \
V(bool8x16_string, "bool8x16") \
V(Bool8x16_string, "Bool8x16") \
V(function_string, "function") \
@@ -246,9 +258,7 @@ namespace internal {
V(multiline_string, "multiline") \
V(sticky_string, "sticky") \
V(unicode_string, "unicode") \
- V(harmony_regexps_string, "harmony_regexps") \
- V(harmony_tostring_string, "harmony_tostring") \
- V(harmony_unicode_regexps_string, "harmony_unicode_regexps") \
+ V(harmony_tolength_string, "harmony_tolength") \
V(input_string, "input") \
V(index_string, "index") \
V(last_index_string, "lastIndex") \
@@ -264,14 +274,12 @@ namespace internal {
V(WeakSet_string, "WeakSet") \
V(for_string, "for") \
V(for_api_string, "for_api") \
- V(for_intern_string, "for_intern") \
- V(private_api_string, "private_api") \
- V(private_intern_string, "private_intern") \
V(Date_string, "Date") \
V(char_at_string, "CharAt") \
V(undefined_string, "undefined") \
- V(value_of_string, "valueOf") \
+ V(valueOf_string, "valueOf") \
V(stack_string, "stack") \
+ V(toString_string, "toString") \
V(toJSON_string, "toJSON") \
V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic") \
V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic") \
@@ -297,40 +305,55 @@ namespace internal {
V(Error_string, "Error") \
V(RegExp_string, "RegExp")
-#define PRIVATE_SYMBOL_LIST(V) \
- V(nonextensible_symbol) \
- V(sealed_symbol) \
- V(hash_code_symbol) \
- V(frozen_symbol) \
- V(nonexistent_symbol) \
- V(elements_transition_symbol) \
- V(observed_symbol) \
- V(uninitialized_symbol) \
- V(megamorphic_symbol) \
- V(premonomorphic_symbol) \
- V(stack_trace_symbol) \
- V(detailed_stack_trace_symbol) \
- V(normal_ic_symbol) \
- V(home_object_symbol) \
- V(intl_initialized_marker_symbol) \
- V(intl_impl_object_symbol) \
- V(promise_debug_marker_symbol) \
- V(promise_has_handler_symbol) \
- V(class_start_position_symbol) \
- V(class_end_position_symbol) \
- V(error_start_pos_symbol) \
- V(error_end_pos_symbol) \
- V(error_script_symbol) \
- V(internal_error_symbol)
-
-#define PUBLIC_SYMBOL_LIST(V) \
- V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance) \
- V(is_concat_spreadable_symbol, symbolIsConcatSpreadable, \
- Symbol.isConcatSpreadable) \
- V(is_regexp_symbol, symbolIsRegExp, Symbol.isRegExp) \
- V(iterator_symbol, symbolIterator, Symbol.iterator) \
- V(to_string_tag_symbol, symbolToStringTag, Symbol.toStringTag) \
- V(unscopables_symbol, symbolUnscopables, Symbol.unscopables)
+#define PRIVATE_SYMBOL_LIST(V) \
+ V(array_iteration_kind_symbol) \
+ V(array_iterator_next_symbol) \
+ V(array_iterator_object_symbol) \
+ V(call_site_function_symbol) \
+ V(call_site_position_symbol) \
+ V(call_site_receiver_symbol) \
+ V(call_site_strict_symbol) \
+ V(class_end_position_symbol) \
+ V(class_start_position_symbol) \
+ V(detailed_stack_trace_symbol) \
+ V(elements_transition_symbol) \
+ V(error_end_pos_symbol) \
+ V(error_script_symbol) \
+ V(error_start_pos_symbol) \
+ V(formatted_stack_trace_symbol) \
+ V(frozen_symbol) \
+ V(hash_code_symbol) \
+ V(home_object_symbol) \
+ V(internal_error_symbol) \
+ V(intl_impl_object_symbol) \
+ V(intl_initialized_marker_symbol) \
+ V(megamorphic_symbol) \
+ V(nonexistent_symbol) \
+ V(nonextensible_symbol) \
+ V(normal_ic_symbol) \
+ V(observed_symbol) \
+ V(premonomorphic_symbol) \
+ V(promise_debug_marker_symbol) \
+ V(promise_has_handler_symbol) \
+ V(promise_on_resolve_symbol) \
+ V(promise_on_reject_symbol) \
+ V(promise_raw_symbol) \
+ V(promise_status_symbol) \
+ V(promise_value_symbol) \
+ V(sealed_symbol) \
+ V(stack_trace_symbol) \
+ V(string_iterator_iterated_string_symbol) \
+ V(string_iterator_next_index_symbol) \
+ V(uninitialized_symbol)
+
+#define PUBLIC_SYMBOL_LIST(V) \
+ V(has_instance_symbol, Symbol.hasInstance) \
+ V(is_concat_spreadable_symbol, Symbol.isConcatSpreadable) \
+ V(is_regexp_symbol, Symbol.isRegExp) \
+ V(iterator_symbol, Symbol.iterator) \
+ V(to_primitive_symbol, Symbol.toPrimitive) \
+ V(to_string_tag_symbol, Symbol.toStringTag) \
+ V(unscopables_symbol, Symbol.unscopables)
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
@@ -354,10 +377,13 @@ namespace internal {
V(MutableHeapNumberMap) \
V(Float32x4Map) \
V(Int32x4Map) \
+ V(Uint32x4Map) \
V(Bool32x4Map) \
V(Int16x8Map) \
+ V(Uint16x8Map) \
V(Bool16x8Map) \
V(Int8x16Map) \
+ V(Uint8x16Map) \
V(Bool8x16Map) \
V(NativeContextMap) \
V(FixedArrayMap) \
@@ -395,34 +421,21 @@ namespace internal {
PRIVATE_SYMBOL_LIST(V)
// Forward declarations.
+class ArrayBufferTracker;
+class GCIdleTimeAction;
+class GCIdleTimeHandler;
+class GCIdleTimeHeapState;
+class GCTracer;
+class HeapObjectsFilter;
class HeapStats;
class Isolate;
+class MemoryReducer;
+class ObjectStats;
+class Scavenger;
+class ScavengeJob;
class WeakObjectRetainer;
-typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
- Object** pointer);
-
-class StoreBufferRebuilder {
- public:
- explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
- : store_buffer_(store_buffer) {}
-
- void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
- StoreBuffer* store_buffer_;
-
- // We record in this variable how full the store buffer was when we started
- // iterating over the current page, finding pointers to new space. If the
- // store buffer overflows again we can exempt the page from the store buffer
- // by rewinding to this point instead of having to search the store buffer.
- Object*** start_of_current_page_;
- // The current page we are scanning in the store buffer iterator.
- MemoryChunk* current_page_;
-};
-
-
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
// The last page in to-space is used for the promotion queue. On conflict
@@ -528,182 +541,244 @@ class PromotionQueue {
};
-typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
- HeapObject* object);
+enum ArrayStorageAllocationMode {
+ DONT_INITIALIZE_ARRAY_ELEMENTS,
+ INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+};
-// External strings table is a place where all external strings are
-// registered. We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable {
+class Heap {
public:
- // Registers an external string.
- inline void AddString(String* string);
+ // Declare all the root indices. This defines the root list order.
+ enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
- inline void Iterate(ObjectVisitor* v);
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+ INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
- // Restores internal invariant and gets rid of collected strings.
- // Must be called after each Iterate() that modified the strings.
- void CleanUp();
+#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
+ PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
- // Destroys all allocated memory.
- void TearDown();
+#define SYMBOL_INDEX_DECLARATION(name, description) k##name##RootIndex,
+ PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
- private:
- explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+ STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+ kStringTableRootIndex,
- friend class Heap;
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+ SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+ kRootListLength,
+ kStrongRootListLength = kStringTableRootIndex,
+ kSmiRootsStart = kStringTableRootIndex + 1
+ };
- inline void Verify();
+ // Indicates whether live bytes adjustment is triggered
+ // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
+ // - or from within GC (CONCURRENT_TO_SWEEPER),
+ // - or mutator code (CONCURRENT_TO_SWEEPER).
+ enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
- inline void AddOldString(String* string);
+ enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
- // Notifies the table that only a prefix of the new list is valid.
- inline void ShrinkNewStrings(int position);
+ enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
- // To speed up scavenge collections new space string are kept
- // separate from old space strings.
- List<Object*> new_space_strings_;
- List<Object*> old_space_strings_;
+ // Taking this lock prevents the GC from entering a phase that relocates
+ // object references.
+ class RelocationLock {
+ public:
+ explicit RelocationLock(Heap* heap) : heap_(heap) {
+ heap_->relocation_mutex_.Lock();
+ }
- Heap* heap_;
+ ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
- DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
-};
+ private:
+ Heap* heap_;
+ };
+ // An optional version of the above lock that can be used for some critical
+ // sections on the mutator thread; only safe since the GC currently does not
+ // do concurrent compaction.
+ class OptionalRelocationLock {
+ public:
+ OptionalRelocationLock(Heap* heap, bool concurrent)
+ : heap_(heap), concurrent_(concurrent) {
+ if (concurrent_) heap_->relocation_mutex_.Lock();
+ }
-enum ArrayStorageAllocationMode {
- DONT_INITIALIZE_ARRAY_ELEMENTS,
- INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
-};
+ ~OptionalRelocationLock() {
+ if (concurrent_) heap_->relocation_mutex_.Unlock();
+ }
+ private:
+ Heap* heap_;
+ bool concurrent_;
+ };
-class Heap {
- public:
- // Configure heap size in MB before setup. Return false if the heap has been
- // set up already.
- bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
- int max_executable_size, size_t code_range_size);
- bool ConfigureHeapDefault();
+ // Support for partial snapshots. After calling this we have a linear
+ // space to write objects in each space.
+ struct Chunk {
+ uint32_t size;
+ Address start;
+ Address end;
+ };
+ typedef List<Chunk> Reservation;
- // Prepares the heap, setting up memory areas that are needed in the isolate
- // without actually creating any objects.
- bool SetUp();
+ static const intptr_t kMinimumOldGenerationAllocationLimit =
+ 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
- // Bootstraps the object heap with the core set of objects required to run.
- // Returns whether it succeeded.
- bool CreateHeapObjects();
+ static const int kInitalOldGenerationLimitFactor = 2;
- // Destroys all memory allocated by the heap.
- void TearDown();
+#if V8_OS_ANDROID
+ // Don't apply pointer multiplier on Android since it has no swap space and
+ // should instead adapt it's heap size based on available physical memory.
+ static const int kPointerMultiplier = 1;
+#else
+ static const int kPointerMultiplier = i::kPointerSize / 4;
+#endif
- // Set the stack limit in the roots_ array. Some architectures generate
- // code that looks here, because it is faster than loading from the static
- // jslimit_/real_jslimit_ variable in the StackGuard.
- void SetStackLimits();
+ // The new space size has to be a power of 2. Sizes are in MB.
+ static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
+ static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
- // Notifies the heap that is ok to start marking or other activities that
- // should not happen during deserialization.
- void NotifyDeserializationComplete();
+ // The old space size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeMediumMemoryDevice =
+ 256 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
+ static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
- // Returns whether SetUp has been called.
- bool HasBeenSetUp();
+ // The executable size has to be a multiple of Page::kPageSize.
+ // Sizes are in MB.
+ static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
+ static const int kMaxExecutableSizeMediumMemoryDevice =
+ 192 * kPointerMultiplier;
+ static const int kMaxExecutableSizeHighMemoryDevice =
+ 256 * kPointerMultiplier;
+ static const int kMaxExecutableSizeHugeMemoryDevice =
+ 256 * kPointerMultiplier;
- // Returns the maximum amount of memory reserved for the heap. For
- // the young generation, we reserve 4 times the amount needed for a
- // semi space. The young generation consists of two semi spaces and
- // we reserve twice the amount needed for those in order to ensure
- // that new space can be aligned to its size.
- intptr_t MaxReserved() {
- return 4 * reserved_semispace_size_ + max_old_generation_size_;
- }
- int MaxSemiSpaceSize() { return max_semi_space_size_; }
- int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
- int InitialSemiSpaceSize() { return initial_semispace_size_; }
- int TargetSemiSpaceSize() { return target_semispace_size_; }
- intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
- intptr_t MaxExecutableSize() { return max_executable_size_; }
+ static const int kTraceRingBufferSize = 512;
+ static const int kStacktraceBufferSize = 512;
- // Returns the capacity of the heap in bytes w/o growing. Heap grows when
- // more spaces are needed until it reaches the limit.
- intptr_t Capacity();
+ static const double kMinHeapGrowingFactor;
+ static const double kMaxHeapGrowingFactor;
+ static const double kMaxHeapGrowingFactorMemoryConstrained;
+ static const double kMaxHeapGrowingFactorIdle;
+ static const double kTargetMutatorUtilization;
- // Returns the amount of memory currently committed for the heap.
- intptr_t CommittedMemory();
+ // Sloppy mode arguments object size.
+ static const int kSloppyArgumentsObjectSize =
+ JSObject::kHeaderSize + 2 * kPointerSize;
- // Returns the amount of memory currently committed for the old space.
- intptr_t CommittedOldGenerationMemory();
+ // Strict mode arguments has no callee so it is smaller.
+ static const int kStrictArgumentsObjectSize =
+ JSObject::kHeaderSize + 1 * kPointerSize;
- // Returns the amount of executable memory currently committed for the heap.
- intptr_t CommittedMemoryExecutable();
+ // Indicies for direct access into argument objects.
+ static const int kArgumentsLengthIndex = 0;
- // Returns the amount of phyical memory currently committed for the heap.
- size_t CommittedPhysicalMemory();
+ // callee is only valid in sloppy mode.
+ static const int kArgumentsCalleeIndex = 1;
- // Returns the maximum amount of memory ever committed for the heap.
- intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+ static const int kNoGCFlags = 0;
+ static const int kReduceMemoryFootprintMask = 1;
+ static const int kAbortIncrementalMarkingMask = 2;
+ static const int kFinalizeIncrementalMarkingMask = 4;
- // Updates the maximum committed memory for the heap. Should be called
- // whenever a space grows.
- void UpdateMaximumCommitted();
+ // Making the heap iterable requires us to abort incremental marking.
+ static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
- // Returns the available bytes in space w/o growing.
- // Heap doesn't guarantee that it can allocate an object that requires
- // all available bytes. Check MaxHeapObjectSize() instead.
- intptr_t Available();
+ // The roots that have an index less than this are always in old space.
+ static const int kOldSpaceRoots = 0x20;
- // Returns of size of all objects residing in the heap.
- intptr_t SizeOfObjects();
+ // The minimum size of a HeapObject on the heap.
+ static const int kMinObjectSizeInWords = 2;
- intptr_t old_generation_allocation_limit() const {
- return old_generation_allocation_limit_;
- }
+ STATIC_ASSERT(kUndefinedValueRootIndex ==
+ Internals::kUndefinedValueRootIndex);
+ STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
+ STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+ STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+ STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
- // Return the starting address and a mask for the new space. And-masking an
- // address with the mask will result in the start address of the new space
- // for all addresses in either semispace.
- Address NewSpaceStart() { return new_space_.start(); }
- uintptr_t NewSpaceMask() { return new_space_.mask(); }
- Address NewSpaceTop() { return new_space_.top(); }
+ // Calculates the maximum amount of filler that could be required by the
+ // given alignment.
+ static int GetMaximumFillToAlign(AllocationAlignment alignment);
+ // Calculates the actual amount of filler required for a given address at the
+ // given alignment.
+ static int GetFillToAlign(Address address, AllocationAlignment alignment);
- NewSpace* new_space() { return &new_space_; }
- OldSpace* old_space() { return old_space_; }
- OldSpace* code_space() { return code_space_; }
- MapSpace* map_space() { return map_space_; }
- LargeObjectSpace* lo_space() { return lo_space_; }
- PagedSpace* paged_space(int idx) {
- switch (idx) {
- case OLD_SPACE:
- return old_space();
- case MAP_SPACE:
- return map_space();
- case CODE_SPACE:
- return code_space();
- case NEW_SPACE:
- case LO_SPACE:
- UNREACHABLE();
- }
- return NULL;
- }
- Space* space(int idx) {
- switch (idx) {
- case NEW_SPACE:
- return new_space();
- case LO_SPACE:
- return lo_space();
- default:
- return paged_space(idx);
- }
+ template <typename T>
+ static inline bool IsOneByte(T t, int chars);
+
+ static void FatalProcessOutOfMemory(const char* location,
+ bool take_snapshot = false);
+
+ static bool RootIsImmortalImmovable(int root_index);
+
+ // Checks whether the space is valid.
+ static bool IsValidAllocationSpace(AllocationSpace space);
+
+ // An object may have an AllocationSite associated with it through a trailing
+ // AllocationMemento. Its feedback should be updated when objects are found
+ // in the heap.
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode);
+
+ // Generated code can embed direct references to non-writable roots if
+ // they are in new space.
+ static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+
+ // Zapping is needed for verify heap, and always done in debug builds.
+ static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+ return true;
+#else
+#ifdef VERIFY_HEAP
+ return FLAG_verify_heap;
+#else
+ return false;
+#endif
+#endif
}
- // Returns name of the space.
- const char* GetSpaceName(int idx);
+ static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+
+ // Copy block of memory from src to dst. Size of block should be aligned
+ // by pointer size.
+ static inline void CopyBlock(Address dst, Address src, int byte_size);
- bool always_allocate() { return always_allocate_scope_depth_ != 0; }
- Address always_allocate_scope_depth_address() {
- return reinterpret_cast<Address>(&always_allocate_scope_depth_);
+ // Optimized version of memmove for blocks with pointer size aligned sizes and
+ // pointer size aligned addresses.
+ static inline void MoveBlock(Address dst, Address src, int byte_size);
+
+ // Determines a static visitor id based on the given {map} that can then be
+ // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
+ static int GetStaticVisitorIdForMap(Map* map);
+
+ // Notifies the heap that is ok to start marking or other activities that
+ // should not happen during deserialization.
+ void NotifyDeserializationComplete();
+
+ intptr_t old_generation_allocation_limit() const {
+ return old_generation_allocation_limit_;
}
+ bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
+
Address* NewSpaceAllocationTopAddress() {
return new_space_.allocation_top_address();
}
@@ -724,73 +799,25 @@ class Heap {
return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
}
- // Returns a deep copy of the JavaScript object.
- // Properties and elements are copied too.
- // Optionally takes an AllocationSite to be appended in an AllocationMemento.
- MUST_USE_RESULT AllocationResult
- CopyJSObject(JSObject* source, AllocationSite* site = NULL);
-
- // Calculates the maximum amount of filler that could be required by the
- // given alignment.
- static int GetMaximumFillToAlign(AllocationAlignment alignment);
- // Calculates the actual amount of filler required for a given address at the
- // given alignment.
- static int GetFillToAlign(Address address, AllocationAlignment alignment);
-
- // Creates a filler object and returns a heap object immediately after it.
- MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
- int filler_size);
- // Creates a filler object if needed for alignment and returns a heap object
- // immediately after it. If any space is left after the returned object,
- // another filler object is created so the over allocated memory is iterable.
- MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
- int object_size,
- int allocation_size,
- AllocationAlignment alignment);
-
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
- // Iterates the whole code space to clear all ICs of the given kind.
- void ClearAllICsByKind(Code::Kind kind);
+ // Iterates the whole code space to clear all keyed store ICs.
+ void ClearAllKeyedStoreICs();
// FreeSpace objects have a null map after deserialization. Update the map.
void RepairFreeListsAfterDeserialization();
- template <typename T>
- static inline bool IsOneByte(T t, int chars);
-
// Move len elements within a given array from src_index index to dst_index
// index.
void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
- // Sloppy mode arguments object size.
- static const int kSloppyArgumentsObjectSize =
- JSObject::kHeaderSize + 2 * kPointerSize;
- // Strict mode arguments has no callee so it is smaller.
- static const int kStrictArgumentsObjectSize =
- JSObject::kHeaderSize + 1 * kPointerSize;
- // Indicies for direct access into argument objects.
- static const int kArgumentsLengthIndex = 0;
- // callee is only valid in sloppy mode.
- static const int kArgumentsCalleeIndex = 1;
-
- // Finalizes an external string by deleting the associated external
- // data and clearing the resource pointer.
- inline void FinalizeExternalString(String* string);
-
// Initialize a filler object to keep the ability to iterate over the heap
// when introducing gaps within pages.
void CreateFillerObjectAt(Address addr, int size);
bool CanMoveObjectStart(HeapObject* object);
- // Indicates whether live bytes adjustment is triggered
- // - from within the GC code before sweeping started (SEQUENTIAL_TO_SWEEPER),
- // - or from within GC (CONCURRENT_TO_SWEEPER),
- // - or mutator code (CONCURRENT_TO_SWEEPER).
- enum InvocationMode { SEQUENTIAL_TO_SWEEPER, CONCURRENT_TO_SWEEPER };
-
// Maintain consistency of live bytes during incremental marking.
void AdjustLiveBytes(HeapObject* object, int by, InvocationMode mode);
@@ -805,65 +832,12 @@ class Heap {
// Converts the given boolean condition to JavaScript boolean value.
inline Object* ToBoolean(bool condition);
- // Performs garbage collection operation.
- // Returns whether there is a chance that another major GC could
- // collect more garbage.
- inline bool CollectGarbage(
- AllocationSpace space, const char* gc_reason = NULL,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
- static const int kNoGCFlags = 0;
- static const int kReduceMemoryFootprintMask = 1;
- static const int kAbortIncrementalMarkingMask = 2;
- static const int kFinalizeIncrementalMarkingMask = 4;
-
- // Making the heap iterable requires us to abort incremental marking.
- static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
-
- // Invoked when GC was requested via the stack guard.
- void HandleGCRequest();
-
- // Attempt to over-approximate the weak closure by marking object groups and
- // implicit references from global handles, but don't atomically complete
- // marking. If we continue to mark incrementally, we might have marked
- // objects that die later.
- void OverApproximateWeakClosure(const char* gc_reason);
-
- // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
- // non-zero, then the slower precise sweeper is used, which leaves the heap
- // in a state where we can iterate over the heap visiting all objects.
- void CollectAllGarbage(
- int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
- // Last hope GC, should try to squeeze as much as possible.
- void CollectAllAvailableGarbage(const char* gc_reason = NULL);
-
// Check whether the heap is currently iterable.
bool IsHeapIterable();
// Notify the heap that a context has been disposed.
int NotifyContextDisposed(bool dependant_context);
- // Start incremental marking and ensure that idle time handler can perform
- // incremental steps.
- void StartIdleIncrementalMarking();
-
- // Starts incremental marking assuming incremental marking is currently
- // stopped.
- void StartIncrementalMarking(int gc_flags,
- const GCCallbackFlags gc_callback_flags,
- const char* reason = nullptr);
-
- // Performs incremental marking steps of step_size_in_bytes as long as
- // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
- // an estimate increment. Returns the remaining time that cannot be used
- // for incremental marking anymore because a single step would exceed the
- // deadline.
- double AdvanceIncrementalMarking(
- intptr_t step_size_in_bytes, double deadline_in_ms,
- IncrementalMarking::StepActions step_actions);
-
inline void increment_scan_on_scavenge_pages() {
scan_on_scavenge_pages_++;
if (FLAG_gc_verbose) {
@@ -878,49 +852,6 @@ class Heap {
}
}
- PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
- void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
- GCType gc_type_filter, bool pass_isolate = true);
- void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
-
- void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
- GCType gc_type_filter, bool pass_isolate = true);
- void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
-
-// Heap root getters. We have versions with and without type::cast() here.
-// You can't use type::cast during GC because the assert fails.
-// TODO(1490): Try removing the unchecked accessors, now that GC marking does
-// not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline type* name(); \
- type* raw_unchecked_##name() { \
- return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]); \
- }
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-// Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
- STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define STRING_ACCESSOR(name, str) inline String* name();
- INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name) inline Symbol* name();
- PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
-#define SYMBOL_ACCESSOR(name, varname, description) inline Symbol* name();
- PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
-#undef SYMBOL_ACCESSOR
-
- // The hidden_string is special because it is the empty string, but does
- // not match the empty string.
- String* hidden_string() { return hidden_string_; }
-
void set_native_contexts_list(Object* object) {
native_contexts_list_ = object;
}
@@ -949,180 +880,30 @@ class Heap {
// Number of mark-sweeps.
int ms_count() const { return ms_count_; }
- // Iterates over all roots in the heap.
- void IterateRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over all strong roots in the heap.
- void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates over entries in the smi roots list. Only interesting to the
- // serializer/deserializer, since GC does not care about smis.
- void IterateSmiRoots(ObjectVisitor* v);
- // Iterates over all the other roots in the heap.
- void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
-
- // Iterate pointers to from semispace of new space found in memory interval
- // from start to end within |object|.
- void IteratePointersToFromSpace(HeapObject* target, int size,
- ObjectSlotCallback callback);
-
- void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
- Address end, bool record_slots,
- ObjectSlotCallback callback);
-
- // Returns whether the object resides in new space.
- inline bool InNewSpace(Object* object);
- inline bool InNewSpace(Address address);
- inline bool InNewSpacePage(Address address);
- inline bool InFromSpace(Object* object);
- inline bool InToSpace(Object* object);
-
- // Returns whether the object resides in old space.
- inline bool InOldSpace(Address address);
- inline bool InOldSpace(Object* object);
-
- // Checks whether an address/object in the heap (including auxiliary
- // area and unused area).
- bool Contains(Address addr);
- bool Contains(HeapObject* value);
-
- // Checks whether an address/object in a space.
- // Currently used by tests, serialization and heap verification only.
- bool InSpace(Address addr, AllocationSpace space);
- bool InSpace(HeapObject* value, AllocationSpace space);
-
- // Checks whether the space is valid.
- static bool IsValidAllocationSpace(AllocationSpace space);
-
// Checks whether the given object is allowed to be migrated from it's
// current space into the given destination space. Used for debugging.
inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
- // Sets the stub_cache_ (only used when expanding the dictionary).
- void public_set_code_stubs(UnseededNumberDictionary* value) {
- roots_[kCodeStubsRootIndex] = value;
- }
-
- // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
- void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
- roots_[kNonMonomorphicCacheRootIndex] = value;
- }
-
- void public_set_empty_script(Script* script) {
- roots_[kEmptyScriptRootIndex] = script;
- }
-
- void public_set_store_buffer_top(Address* top) {
- roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
- }
-
- void public_set_materialized_objects(FixedArray* objects) {
- roots_[kMaterializedObjectsRootIndex] = objects;
- }
-
- // Generated code can embed this address to get access to the roots.
- Object** roots_array_start() { return roots_; }
-
- Address* store_buffer_top_address() {
- return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
- }
-
- static bool RootIsImmortalImmovable(int root_index);
void CheckHandleCount();
-#ifdef VERIFY_HEAP
- // Verify the heap is in its normal state before or after a GC.
- void Verify();
-#endif
-
-#ifdef DEBUG
- void Print();
- void PrintHandles();
-
- // Report heap statistics.
- void ReportHeapStatistics(const char* title);
- void ReportCodeStatistics(const char* title);
-#endif
-
- // Zapping is needed for verify heap, and always done in debug builds.
- static inline bool ShouldZapGarbage() {
-#ifdef DEBUG
- return true;
-#else
-#ifdef VERIFY_HEAP
- return FLAG_verify_heap;
-#else
- return false;
-#endif
-#endif
- }
-
// Number of "runtime allocations" done so far.
uint32_t allocations_count() { return allocations_count_; }
// Returns deterministic "time" value in ms. Works only with
// FLAG_verify_predictable.
- double synthetic_time() { return allocations_count_ / 2.0; }
+ double synthetic_time() { return allocations_count() / 2.0; }
// Print short heap statistics.
void PrintShortHeapStatistics();
- size_t object_count_last_gc(size_t index) {
- return index < OBJECT_STATS_COUNT ? object_counts_last_time_[index] : 0;
- }
- size_t object_size_last_gc(size_t index) {
- return index < OBJECT_STATS_COUNT ? object_sizes_last_time_[index] : 0;
- }
-
- // Write barrier support for address[offset] = o.
- INLINE(void RecordWrite(Address address, int offset));
-
- // Write barrier support for address[start : start + len[ = o.
- INLINE(void RecordWrites(Address address, int start, int len));
-
- enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
inline HeapState gc_state() { return gc_state_; }
inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
-#ifdef DEBUG
- void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
-
- void TracePathToObjectFrom(Object* target, Object* root);
- void TracePathToObject(Object* target);
- void TracePathToGlobal();
-#endif
-
- // Callback function passed to Heap::Iterate etc. Copies an object if
- // necessary, the object might be promoted to an old space. The caller must
- // ensure the precondition that the object is (a) a heap object and (b) in
- // the heap's from space.
- static inline void ScavengePointer(HeapObject** p);
- static inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
- // Slow part of scavenge object.
- static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
-
- enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
-
// If an object has an AllocationMemento trailing it, return it, otherwise
// return NULL;
inline AllocationMemento* FindAllocationMemento(HeapObject* object);
- // An object may have an AllocationSite associated with it through a trailing
- // AllocationMemento. Its feedback should be updated when objects are found
- // in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object,
- ScratchpadSlotMode mode);
-
- // Support for partial snapshots. After calling this we have a linear
- // space to write objects in each space.
- struct Chunk {
- uint32_t size;
- Address start;
- Address end;
- };
-
- typedef List<Chunk> Reservation;
-
// Returns false if not able to reserve.
bool ReserveSpace(Reservation* reservations);
@@ -1132,678 +913,671 @@ class Heap {
void CreateApiObjects();
- inline intptr_t PromotedTotalSize() {
- int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
- if (total > std::numeric_limits<intptr_t>::max()) {
- // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
- return std::numeric_limits<intptr_t>::max();
- }
- if (total < 0) return 0;
- return static_cast<intptr_t>(total);
- }
+ // Implements the corresponding V8 API function.
+ bool IdleNotification(double deadline_in_seconds);
+ bool IdleNotification(int idle_time_in_ms);
- inline intptr_t OldGenerationSpaceAvailable() {
- return old_generation_allocation_limit_ - PromotedTotalSize();
- }
+ double MonotonicallyIncreasingTimeInMs();
- inline intptr_t OldGenerationCapacityAvailable() {
- return max_old_generation_size_ - PromotedTotalSize();
- }
+ void RecordStats(HeapStats* stats, bool take_snapshot = false);
- static const intptr_t kMinimumOldGenerationAllocationLimit =
- 8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+ // Check new space expansion criteria and expand semispaces if it was hit.
+ void CheckNewSpaceExpansionCriteria();
- static const int kInitalOldGenerationLimitFactor = 2;
+ inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
+ if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
-#if V8_OS_ANDROID
- // Don't apply pointer multiplier on Android since it has no swap space and
- // should instead adapt it's heap size based on available physical memory.
- static const int kPointerMultiplier = 1;
-#else
- static const int kPointerMultiplier = i::kPointerSize / 4;
-#endif
+ intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
- // The new space size has to be a power of 2. Sizes are in MB.
- static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
- static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
+ if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
- // The old space size has to be a multiple of Page::kPageSize.
- // Sizes are in MB.
- static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeMediumMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
- static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+ return false;
+ }
- // The executable size has to be a multiple of Page::kPageSize.
- // Sizes are in MB.
- static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
- static const int kMaxExecutableSizeMediumMemoryDevice =
- 192 * kPointerMultiplier;
- static const int kMaxExecutableSizeHighMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxExecutableSizeHugeMemoryDevice =
- 256 * kPointerMultiplier;
+ void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
- static const int kTraceRingBufferSize = 512;
- static const int kStacktraceBufferSize = 512;
+ // An object should be promoted if the object has survived a
+ // scavenge operation.
+ inline bool ShouldBePromoted(Address old_address, int object_size);
- static const double kMinHeapGrowingFactor;
- static const double kMaxHeapGrowingFactor;
- static const double kMaxHeapGrowingFactorMemoryConstrained;
- static const double kMaxHeapGrowingFactorIdle;
- static const double kTargetMutatorUtilization;
+ void ClearNormalizedMapCaches();
- static double HeapGrowingFactor(double gc_speed, double mutator_speed);
+ void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
- // Calculates the allocation limit based on a given growing factor and a
- // given old generation size.
- intptr_t CalculateOldGenerationAllocationLimit(double factor,
- intptr_t old_gen_size);
+ bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
- // Sets the allocation limit to trigger the next full garbage collection.
- void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
- double mutator_speed);
+ inline bool OldGenerationAllocationLimitReached();
- // Decrease the allocation limit if the new limit based on the given
- // parameters is lower than the current limit.
- void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
- double gc_speed,
- double mutator_speed);
+ void QueueMemoryChunkForFree(MemoryChunk* chunk);
+ void FilterStoreBufferEntriesOnAboutToBeFreedPages();
+ void FreeQueuedChunks(MemoryChunk* list_head);
+ void FreeQueuedChunks();
+ void WaitUntilUnmappingOfFreeChunksCompleted();
- // Indicates whether inline bump-pointer allocation has been disabled.
- bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+ // Completely clear the Instanceof cache (to stop it keeping objects alive
+ // around a GC).
+ inline void CompletelyClearInstanceofCache();
- // Switch whether inline bump-pointer allocation should be used.
- void EnableInlineAllocation();
- void DisableInlineAllocation();
+ inline uint32_t HashSeed();
- // Implements the corresponding V8 API function.
- bool IdleNotification(double deadline_in_seconds);
- bool IdleNotification(int idle_time_in_ms);
+ inline int NextScriptId();
- double MonotonicallyIncreasingTimeInMs();
+ inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
+ inline void SetConstructStubDeoptPCOffset(int pc_offset);
+ inline void SetGetterStubDeoptPCOffset(int pc_offset);
+ inline void SetSetterStubDeoptPCOffset(int pc_offset);
- // Declare all the root indices. This defines the root list order.
- enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
+ // For post mortem debugging.
+ void RememberUnmappedPage(Address page, bool compacted);
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
- INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
+ // Global inline caching age: it is incremented on some GCs after context
+ // disposal. We use it to flush inline caches.
+ int global_ic_age() { return global_ic_age_; }
-#define SYMBOL_INDEX_DECLARATION(name) k##name##RootIndex,
- PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
+ void AgeInlineCaches() {
+ global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+ }
-#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex,
- PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
-#undef SYMBOL_INDEX_DECLARATION
+ int64_t amount_of_external_allocated_memory() {
+ return amount_of_external_allocated_memory_;
+ }
-// Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
- STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
- kStringTableRootIndex,
+ void update_amount_of_external_allocated_memory(int64_t delta) {
+ amount_of_external_allocated_memory_ += delta;
+ }
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
- SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
- kRootListLength,
- kStrongRootListLength = kStringTableRootIndex,
- kSmiRootsStart = kStringTableRootIndex + 1
- };
+ void DeoptMarkedAllocationSites();
- Object* root(RootListIndex index) { return roots_[index]; }
+ bool DeoptMaybeTenuredAllocationSites() {
+ return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+ }
- STATIC_ASSERT(kUndefinedValueRootIndex ==
- Internals::kUndefinedValueRootIndex);
- STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
- STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
- STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
- STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+ void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
+ Handle<DependentCode> dep);
- // Generated code can embed direct references to non-writable roots if
- // they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
- // Generated code can treat direct references to this root as constant.
- bool RootCanBeTreatedAsConstant(RootListIndex root_index);
+ DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
- Map* MapForFixedTypedArray(ExternalArrayType array_type);
- RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
+ void AddRetainedMap(Handle<Map> map);
- RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
- FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
+ // This event is triggered after successful allocation of a new object made
+ // by runtime. Allocations of target space for object evacuation do not
+ // trigger the event. In order to track ALL allocations one must turn off
+ // FLAG_inline_new and FLAG_use_allocation_folding.
+ inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
- void RecordStats(HeapStats* stats, bool take_snapshot = false);
+ // This event is triggered after object is moved to a new place.
+ inline void OnMoveEvent(HeapObject* target, HeapObject* source,
+ int size_in_bytes);
- // Copy block of memory from src to dst. Size of block should be aligned
- // by pointer size.
- static inline void CopyBlock(Address dst, Address src, int byte_size);
+ bool deserialization_complete() const { return deserialization_complete_; }
- // Optimized version of memmove for blocks with pointer size aligned sizes and
- // pointer size aligned addresses.
- static inline void MoveBlock(Address dst, Address src, int byte_size);
+ bool HasLowAllocationRate();
+ bool HasHighFragmentation();
+ bool HasHighFragmentation(intptr_t used, intptr_t committed);
- // Check new space expansion criteria and expand semispaces if it was hit.
- void CheckNewSpaceExpansionCriteria();
+ bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
- inline void IncrementPromotedObjectsSize(int object_size) {
- DCHECK(object_size > 0);
- promoted_objects_size_ += object_size;
- }
+ // ===========================================================================
+ // Initialization. ===========================================================
+ // ===========================================================================
- inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
- DCHECK(object_size > 0);
- semi_space_copied_object_size_ += object_size;
- }
+ // Configure heap size in MB before setup. Return false if the heap has been
+ // set up already.
+ bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+ int max_executable_size, size_t code_range_size);
+ bool ConfigureHeapDefault();
- inline intptr_t SurvivedNewSpaceObjectSize() {
- return promoted_objects_size_ + semi_space_copied_object_size_;
- }
+ // Prepares the heap, setting up memory areas that are needed in the isolate
+ // without actually creating any objects.
+ bool SetUp();
- inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
+ // Bootstraps the object heap with the core set of objects required to run.
+ // Returns whether it succeeded.
+ bool CreateHeapObjects();
- inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
+ // Destroys all memory allocated by the heap.
+ void TearDown();
- inline void IncrementNodesPromoted() { nodes_promoted_++; }
+ // Returns whether SetUp has been called.
+ bool HasBeenSetUp();
- inline void IncrementYoungSurvivorsCounter(int survived) {
- DCHECK(survived >= 0);
- survived_last_scavenge_ = survived;
- survived_since_last_expansion_ += survived;
- }
+ // ===========================================================================
+ // Getters for spaces. =======================================================
+ // ===========================================================================
- inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
- if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
+ // Return the starting address and a mask for the new space. And-masking an
+ // address with the mask will result in the start address of the new space
+ // for all addresses in either semispace.
+ Address NewSpaceStart() { return new_space_.start(); }
+ uintptr_t NewSpaceMask() { return new_space_.mask(); }
+ Address NewSpaceTop() { return new_space_.top(); }
- intptr_t adjusted_allocation_limit = limit - new_space_.Capacity();
+ NewSpace* new_space() { return &new_space_; }
+ OldSpace* old_space() { return old_space_; }
+ OldSpace* code_space() { return code_space_; }
+ MapSpace* map_space() { return map_space_; }
+ LargeObjectSpace* lo_space() { return lo_space_; }
- if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
+ PagedSpace* paged_space(int idx) {
+ switch (idx) {
+ case OLD_SPACE:
+ return old_space();
+ case MAP_SPACE:
+ return map_space();
+ case CODE_SPACE:
+ return code_space();
+ case NEW_SPACE:
+ case LO_SPACE:
+ UNREACHABLE();
+ }
+ return NULL;
+ }
- return false;
+ Space* space(int idx) {
+ switch (idx) {
+ case NEW_SPACE:
+ return new_space();
+ case LO_SPACE:
+ return lo_space();
+ default:
+ return paged_space(idx);
+ }
}
- void UpdateNewSpaceReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func);
+ // Returns name of the space.
+ const char* GetSpaceName(int idx);
- void UpdateReferencesInExternalStringTable(
- ExternalStringTableUpdaterCallback updater_func);
+ // ===========================================================================
+ // Getters to other components. ==============================================
+ // ===========================================================================
- void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
- void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
+ GCTracer* tracer() { return tracer_; }
- void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+ PromotionQueue* promotion_queue() { return &promotion_queue_; }
- // An object should be promoted if the object has survived a
- // scavenge operation.
- inline bool ShouldBePromoted(Address old_address, int object_size);
+ inline Isolate* isolate();
- void ClearNormalizedMapCaches();
+ MarkCompactCollector* mark_compact_collector() {
+ return mark_compact_collector_;
+ }
- GCTracer* tracer() { return &tracer_; }
+ // ===========================================================================
+ // Root set access. ==========================================================
+ // ===========================================================================
- // Returns the size of objects residing in non new spaces.
- intptr_t PromotedSpaceSizeOfObjects();
+ // Heap root getters.
+#define ROOT_ACCESSOR(type, name, camel_name) inline type* name();
+ ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
- double total_regexp_code_generated() { return total_regexp_code_generated_; }
- void IncreaseTotalRegexpCodeGenerated(int size) {
- total_regexp_code_generated_ += size;
- }
+ // Utility type maps.
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) inline Map* name##_map();
+ STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
- void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
- if (is_crankshafted) {
- crankshaft_codegen_bytes_generated_ += size;
- } else {
- full_codegen_bytes_generated_ += size;
- }
- }
+#define STRING_ACCESSOR(name, str) inline String* name();
+ INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
- void UpdateNewSpaceAllocationCounter() {
- new_space_allocation_counter_ = NewSpaceAllocationCounter();
- }
+#define SYMBOL_ACCESSOR(name) inline Symbol* name();
+ PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
- size_t NewSpaceAllocationCounter() {
- return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
- }
+#define SYMBOL_ACCESSOR(name, description) inline Symbol* name();
+ PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
- // This should be used only for testing.
- void set_new_space_allocation_counter(size_t new_value) {
- new_space_allocation_counter_ = new_value;
+ Object* root(RootListIndex index) { return roots_[index]; }
+ Handle<Object> root_handle(RootListIndex index) {
+ return Handle<Object>(&roots_[index]);
}
- void UpdateOldGenerationAllocationCounter() {
- old_generation_allocation_counter_ = OldGenerationAllocationCounter();
- }
+ // Generated code can embed this address to get access to the roots.
+ Object** roots_array_start() { return roots_; }
- size_t OldGenerationAllocationCounter() {
- return old_generation_allocation_counter_ + PromotedSinceLastGC();
+ // Sets the stub_cache_ (only used when expanding the dictionary).
+ void SetRootCodeStubs(UnseededNumberDictionary* value) {
+ roots_[kCodeStubsRootIndex] = value;
}
- // This should be used only for testing.
- void set_old_generation_allocation_counter(size_t new_value) {
- old_generation_allocation_counter_ = new_value;
+ // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
+ void SetRootNonMonomorphicCache(UnseededNumberDictionary* value) {
+ roots_[kNonMonomorphicCacheRootIndex] = value;
}
- size_t PromotedSinceLastGC() {
- return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
+ void SetRootMaterializedObjects(FixedArray* objects) {
+ roots_[kMaterializedObjectsRootIndex] = objects;
}
- // Update GC statistics that are tracked on the Heap.
- void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
- double marking_time);
+ void SetRootCodeStubContext(Object* value) {
+ roots_[kCodeStubContextRootIndex] = value;
+ }
- // Returns maximum GC pause.
- double get_max_gc_pause() { return max_gc_pause_; }
+ void SetRootCodeStubExportsObject(JSObject* value) {
+ roots_[kCodeStubExportsObjectRootIndex] = value;
+ }
- // Returns maximum size of objects alive after GC.
- intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
+ void SetRootScriptList(Object* value) {
+ roots_[kScriptListRootIndex] = value;
+ }
- // Returns minimal interval between two subsequent collections.
- double get_min_in_mutator() { return min_in_mutator_; }
+ void SetRootStringTable(StringTable* value) {
+ roots_[kStringTableRootIndex] = value;
+ }
- void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
+ // Set the stack limit in the roots_ array. Some architectures generate
+ // code that looks here, because it is faster than loading from the static
+ // jslimit_/real_jslimit_ variable in the StackGuard.
+ void SetStackLimits();
- MarkCompactCollector* mark_compact_collector() {
- return &mark_compact_collector_;
- }
+ // Generated code can treat direct references to this root as constant.
+ bool RootCanBeTreatedAsConstant(RootListIndex root_index);
- StoreBuffer* store_buffer() { return &store_buffer_; }
+ Map* MapForFixedTypedArray(ExternalArrayType array_type);
+ RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
- IncrementalMarking* incremental_marking() { return &incremental_marking_; }
+ RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
+ FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
- ExternalStringTable* external_string_table() {
- return &external_string_table_;
- }
+ void RegisterStrongRoots(Object** start, Object** end);
+ void UnregisterStrongRoots(Object** start);
- bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
+ // ===========================================================================
+ // Inline allocation. ========================================================
+ // ===========================================================================
- inline Isolate* isolate();
+ // Indicates whether inline bump-pointer allocation has been disabled.
+ bool inline_allocation_disabled() { return inline_allocation_disabled_; }
- void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
- void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
+ // Switch whether inline bump-pointer allocation should be used.
+ void EnableInlineAllocation();
+ void DisableInlineAllocation();
- inline bool OldGenerationAllocationLimitReached();
+ // ===========================================================================
+ // Methods triggering GCs. ===================================================
+ // ===========================================================================
- void QueueMemoryChunkForFree(MemoryChunk* chunk);
- void FreeQueuedChunks();
+ // Performs garbage collection operation.
+ // Returns whether there is a chance that another major GC could
+ // collect more garbage.
+ inline bool CollectGarbage(
+ AllocationSpace space, const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
- int gc_count() const { return gc_count_; }
+ // Performs a full garbage collection. If (flags & kMakeHeapIterableMask) is
+ // non-zero, then the slower precise sweeper is used, which leaves the heap
+ // in a state where we can iterate over the heap visiting all objects.
+ void CollectAllGarbage(
+ int flags = kFinalizeIncrementalMarkingMask, const char* gc_reason = NULL,
+ const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
- bool RecentIdleNotificationHappened();
+ // Last hope GC, should try to squeeze as much as possible.
+ void CollectAllAvailableGarbage(const char* gc_reason = NULL);
- // Completely clear the Instanceof cache (to stop it keeping objects alive
- // around a GC).
- inline void CompletelyClearInstanceofCache();
+ // Reports and external memory pressure event, either performs a major GC or
+ // completes incremental marking in order to free external resources.
+ void ReportExternalMemoryPressure(const char* gc_reason = NULL);
- // The roots that have an index less than this are always in old space.
- static const int kOldSpaceRoots = 0x20;
+ // Invoked when GC was requested via the stack guard.
+ void HandleGCRequest();
- inline uint32_t HashSeed();
+ // ===========================================================================
+ // Iterators. ================================================================
+ // ===========================================================================
- inline Smi* NextScriptId();
+ // Iterates over all roots in the heap.
+ void IterateRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over all strong roots in the heap.
+ void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ // Iterates over entries in the smi roots list. Only interesting to the
+ // serializer/deserializer, since GC does not care about smis.
+ void IterateSmiRoots(ObjectVisitor* v);
+ // Iterates over all the other roots in the heap.
+ void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
- inline void SetArgumentsAdaptorDeoptPCOffset(int pc_offset);
- inline void SetConstructStubDeoptPCOffset(int pc_offset);
- inline void SetGetterStubDeoptPCOffset(int pc_offset);
- inline void SetSetterStubDeoptPCOffset(int pc_offset);
+ // Iterate pointers to from semispace of new space found in memory interval
+ // from start to end within |object|.
+ void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
+ Address end, bool record_slots,
+ ObjectSlotCallback callback);
- // For post mortem debugging.
- void RememberUnmappedPage(Address page, bool compacted);
+ // ===========================================================================
+ // Store buffer API. =========================================================
+ // ===========================================================================
- // Global inline caching age: it is incremented on some GCs after context
- // disposal. We use it to flush inline caches.
- int global_ic_age() { return global_ic_age_; }
+ // Write barrier support for address[offset] = o.
+ INLINE(void RecordWrite(Address address, int offset));
- void AgeInlineCaches() {
- global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
- }
+ // Write barrier support for address[start : start + len[ = o.
+ INLINE(void RecordWrites(Address address, int start, int len));
- int64_t amount_of_external_allocated_memory() {
- return amount_of_external_allocated_memory_;
+ Address* store_buffer_top_address() {
+ return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
- void DeoptMarkedAllocationSites();
+ // ===========================================================================
+ // Incremental marking API. ==================================================
+ // ===========================================================================
- bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+ // Start incremental marking and ensure that idle time handler can perform
+ // incremental steps.
+ void StartIdleIncrementalMarking();
- bool DeoptMaybeTenuredAllocationSites() {
- return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
- }
+ // Starts incremental marking assuming incremental marking is currently
+ // stopped.
+ void StartIncrementalMarking(int gc_flags = kNoGCFlags,
+ const GCCallbackFlags gc_callback_flags =
+ GCCallbackFlags::kNoGCCallbackFlags,
+ const char* reason = nullptr);
- // ObjectStats are kept in two arrays, counts and sizes. Related stats are
- // stored in a contiguous linear buffer. Stats groups are stored one after
- // another.
- enum {
- FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
- FIRST_FIXED_ARRAY_SUB_TYPE =
- FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
- FIRST_CODE_AGE_SUB_TYPE =
- FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
- OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
- };
+ void FinalizeIncrementalMarkingIfComplete(const char* comment);
- void RecordObjectStats(InstanceType type, size_t size) {
- DCHECK(type <= LAST_TYPE);
- object_counts_[type]++;
- object_sizes_[type] += size;
- }
+ bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
- void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
- int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
- int code_age_index =
- FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
- DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
- code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
- DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
- code_age_index < OBJECT_STATS_COUNT);
- object_counts_[code_sub_type_index]++;
- object_sizes_[code_sub_type_index] += size;
- object_counts_[code_age_index]++;
- object_sizes_[code_age_index] += size;
- }
+ IncrementalMarking* incremental_marking() { return incremental_marking_; }
- void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
- DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
- object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
- object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
- }
+ // ===========================================================================
+ // External string table API. ================================================
+ // ===========================================================================
- void TraceObjectStats();
- void TraceObjectStat(const char* name, int count, int size, double time);
- void CheckpointObjectStats();
- bool GetObjectTypeName(size_t index, const char** object_type,
- const char** object_sub_type);
+ // Registers an external string.
+ inline void RegisterExternalString(String* string);
- void RegisterStrongRoots(Object** start, Object** end);
- void UnregisterStrongRoots(Object** start);
+ // Finalizes an external string by deleting the associated external
+ // data and clearing the resource pointer.
+ inline void FinalizeExternalString(String* string);
- // Taking this lock prevents the GC from entering a phase that relocates
- // object references.
- class RelocationLock {
- public:
- explicit RelocationLock(Heap* heap) : heap_(heap) {
- heap_->relocation_mutex_.Lock();
- }
+ // ===========================================================================
+ // Methods checking/returning the space of a given object/address. ===========
+ // ===========================================================================
- ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
+ // Returns whether the object resides in new space.
+ inline bool InNewSpace(Object* object);
+ inline bool InNewSpace(Address address);
+ inline bool InNewSpacePage(Address address);
+ inline bool InFromSpace(Object* object);
+ inline bool InToSpace(Object* object);
- private:
- Heap* heap_;
- };
+ // Returns whether the object resides in old space.
+ inline bool InOldSpace(Address address);
+ inline bool InOldSpace(Object* object);
- // An optional version of the above lock that can be used for some critical
- // sections on the mutator thread; only safe since the GC currently does not
- // do concurrent compaction.
- class OptionalRelocationLock {
- public:
- OptionalRelocationLock(Heap* heap, bool concurrent)
- : heap_(heap), concurrent_(concurrent) {
- if (concurrent_) heap_->relocation_mutex_.Lock();
- }
+ // Checks whether an address/object in the heap (including auxiliary
+ // area and unused area).
+ bool Contains(Address addr);
+ bool Contains(HeapObject* value);
- ~OptionalRelocationLock() {
- if (concurrent_) heap_->relocation_mutex_.Unlock();
- }
+ // Checks whether an address/object in a space.
+ // Currently used by tests, serialization and heap verification only.
+ bool InSpace(Address addr, AllocationSpace space);
+ bool InSpace(HeapObject* value, AllocationSpace space);
- private:
- Heap* heap_;
- bool concurrent_;
- };
+ // ===========================================================================
+ // Object statistics tracking. ===============================================
+ // ===========================================================================
- void AddWeakObjectToCodeDependency(Handle<HeapObject> obj,
- Handle<DependentCode> dep);
+ // Returns the number of buckets used by object statistics tracking during a
+ // major GC. Note that the following methods fail gracefully when the bounds
+ // are exceeded though.
+ size_t NumberOfTrackedHeapObjectTypes();
- DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
+ // Returns object statistics about count and size at the last major GC.
+ // Objects are being grouped into buckets that roughly resemble existing
+ // instance types.
+ size_t ObjectCountAtLastGC(size_t index);
+ size_t ObjectSizeAtLastGC(size_t index);
- void AddRetainedMap(Handle<Map> map);
+ // Retrieves names of buckets used by object statistics tracking.
+ bool GetObjectTypeName(size_t index, const char** object_type,
+ const char** object_sub_type);
- static void FatalProcessOutOfMemory(const char* location,
- bool take_snapshot = false);
+ // ===========================================================================
+ // GC statistics. ============================================================
+ // ===========================================================================
- // This event is triggered after successful allocation of a new object made
- // by runtime. Allocations of target space for object evacuation do not
- // trigger the event. In order to track ALL allocations one must turn off
- // FLAG_inline_new and FLAG_use_allocation_folding.
- inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+ // Returns the maximum amount of memory reserved for the heap. For
+ // the young generation, we reserve 4 times the amount needed for a
+ // semi space. The young generation consists of two semi spaces and
+ // we reserve twice the amount needed for those in order to ensure
+ // that new space can be aligned to its size.
+ intptr_t MaxReserved() {
+ return 4 * reserved_semispace_size_ + max_old_generation_size_;
+ }
+ int MaxSemiSpaceSize() { return max_semi_space_size_; }
+ int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+ int InitialSemiSpaceSize() { return initial_semispace_size_; }
+ int TargetSemiSpaceSize() { return target_semispace_size_; }
+ intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+ intptr_t MaxExecutableSize() { return max_executable_size_; }
- // This event is triggered after object is moved to a new place.
- inline void OnMoveEvent(HeapObject* target, HeapObject* source,
- int size_in_bytes);
+ // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+ // more spaces are needed until it reaches the limit.
+ intptr_t Capacity();
- bool deserialization_complete() const { return deserialization_complete_; }
+ // Returns the amount of memory currently committed for the heap.
+ intptr_t CommittedMemory();
- // The following methods are used to track raw C++ pointers to externally
- // allocated memory used as backing store in live array buffers.
+ // Returns the amount of memory currently committed for the old space.
+ intptr_t CommittedOldGenerationMemory();
- // A new ArrayBuffer was created with |data| as backing store.
- void RegisterNewArrayBuffer(bool in_new_space, void* data, size_t length);
+ // Returns the amount of executable memory currently committed for the heap.
+ intptr_t CommittedMemoryExecutable();
- // The backing store |data| is no longer owned by V8.
- void UnregisterArrayBuffer(bool in_new_space, void* data);
+ // Returns the amount of phyical memory currently committed for the heap.
+ size_t CommittedPhysicalMemory();
- // A live ArrayBuffer was discovered during marking/scavenge.
- void RegisterLiveArrayBuffer(bool from_scavenge, void* data);
+ // Returns the maximum amount of memory ever committed for the heap.
+ intptr_t MaximumCommittedMemory() { return maximum_committed_; }
- // Frees all backing store pointers that weren't discovered in the previous
- // marking or scavenge phase.
- void FreeDeadArrayBuffers(bool from_scavenge);
+ // Updates the maximum committed memory for the heap. Should be called
+ // whenever a space grows.
+ void UpdateMaximumCommitted();
- // Prepare for a new scavenge phase. A new marking phase is implicitly
- // prepared by finishing the previous one.
- void PrepareArrayBufferDiscoveryInNewSpace();
+ // Returns the available bytes in space w/o growing.
+ // Heap doesn't guarantee that it can allocate an object that requires
+ // all available bytes. Check MaxHeapObjectSize() instead.
+ intptr_t Available();
- // An ArrayBuffer moved from new space to old space.
- void PromoteArrayBuffer(Object* buffer);
+ // Returns of size of all objects residing in the heap.
+ intptr_t SizeOfObjects();
- bool HasLowAllocationRate();
- bool HasHighFragmentation();
- bool HasHighFragmentation(intptr_t used, intptr_t committed);
+ void UpdateSurvivalStatistics(int start_new_space_size);
- protected:
- // Methods made available to tests.
+ inline void IncrementPromotedObjectsSize(int object_size) {
+ DCHECK(object_size > 0);
+ promoted_objects_size_ += object_size;
+ }
+ inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
- // Allocates a JS Map in the heap.
- MUST_USE_RESULT AllocationResult
- AllocateMap(InstanceType instance_type, int instance_size,
- ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+ inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+ DCHECK(object_size > 0);
+ semi_space_copied_object_size_ += object_size;
+ }
+ inline intptr_t semi_space_copied_object_size() {
+ return semi_space_copied_object_size_;
+ }
- // Allocates and initializes a new JavaScript object based on a
- // constructor.
- // If allocation_site is non-null, then a memento is emitted after the object
- // that points to the site.
- MUST_USE_RESULT AllocationResult
- AllocateJSObject(JSFunction* constructor,
- PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ inline intptr_t SurvivedNewSpaceObjectSize() {
+ return promoted_objects_size_ + semi_space_copied_object_size_;
+ }
- // Allocates and initializes a new JavaScript object based on a map.
- // Passing an allocation site means that a memento will be created that
- // points to the site.
- MUST_USE_RESULT AllocationResult
- AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
- AllocationSite* allocation_site = NULL);
+ inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
- // Allocates a HeapNumber from value.
- MUST_USE_RESULT AllocationResult
- AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
- PretenureFlag pretenure = NOT_TENURED);
+ inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
-// Allocates SIMD values from the given lane values.
-#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
- AllocationResult Allocate##Type(lane_type lanes[lane_count], \
- PretenureFlag pretenure = NOT_TENURED);
- SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
-#undef SIMD_ALLOCATE_DECLARATION
+ inline void IncrementNodesPromoted() { nodes_promoted_++; }
- // Allocates a byte array of the specified length
- MUST_USE_RESULT AllocationResult
- AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ inline void IncrementYoungSurvivorsCounter(int survived) {
+ DCHECK(survived >= 0);
+ survived_last_scavenge_ = survived;
+ survived_since_last_expansion_ += survived;
+ }
- // Allocates a bytecode array with given contents.
- MUST_USE_RESULT AllocationResult
- AllocateBytecodeArray(int length, const byte* raw_bytecodes,
- int frame_size);
+ inline intptr_t PromotedTotalSize() {
+ int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+ if (total > std::numeric_limits<intptr_t>::max()) {
+ // TODO(erikcorry): Use uintptr_t everywhere we do heap size calculations.
+ return std::numeric_limits<intptr_t>::max();
+ }
+ if (total < 0) return 0;
+ return static_cast<intptr_t>(total);
+ }
- // Copy the code and scope info part of the code object, but insert
- // the provided data as the relocation information.
- MUST_USE_RESULT AllocationResult
- CopyCode(Code* code, Vector<byte> reloc_info);
+ void UpdateNewSpaceAllocationCounter() {
+ new_space_allocation_counter_ = NewSpaceAllocationCounter();
+ }
- MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+ size_t NewSpaceAllocationCounter() {
+ return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
+ }
- // Allocates a fixed array initialized with undefined values
- MUST_USE_RESULT AllocationResult
- AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+ // This should be used only for testing.
+ void set_new_space_allocation_counter(size_t new_value) {
+ new_space_allocation_counter_ = new_value;
+ }
- static const int kInitialStringTableSize = 2048;
- static const int kInitialEvalCacheSize = 64;
- static const int kInitialNumberStringCacheSize = 256;
+ void UpdateOldGenerationAllocationCounter() {
+ old_generation_allocation_counter_ = OldGenerationAllocationCounter();
+ }
- private:
- Heap();
+ size_t OldGenerationAllocationCounter() {
+ return old_generation_allocation_counter_ + PromotedSinceLastGC();
+ }
- int current_gc_flags() { return current_gc_flags_; }
- void set_current_gc_flags(int flags) {
- current_gc_flags_ = flags;
- DCHECK(!ShouldFinalizeIncrementalMarking() ||
- !ShouldAbortIncrementalMarking());
+ // This should be used only for testing.
+ void set_old_generation_allocation_counter(size_t new_value) {
+ old_generation_allocation_counter_ = new_value;
}
- inline bool ShouldReduceMemory() const {
- return current_gc_flags_ & kReduceMemoryFootprintMask;
+ size_t PromotedSinceLastGC() {
+ return PromotedSpaceSizeOfObjects() - old_generation_size_at_last_gc_;
}
- inline bool ShouldAbortIncrementalMarking() const {
- return current_gc_flags_ & kAbortIncrementalMarkingMask;
+ int gc_count() const { return gc_count_; }
+
+ // Returns the size of objects residing in non new spaces.
+ intptr_t PromotedSpaceSizeOfObjects();
+
+ double total_regexp_code_generated() { return total_regexp_code_generated_; }
+ void IncreaseTotalRegexpCodeGenerated(int size) {
+ total_regexp_code_generated_ += size;
}
- inline bool ShouldFinalizeIncrementalMarking() const {
- return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
+ void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+ if (is_crankshafted) {
+ crankshaft_codegen_bytes_generated_ += size;
+ } else {
+ full_codegen_bytes_generated_ += size;
+ }
}
- // The amount of external memory registered through the API kept alive
- // by global handles
- int64_t amount_of_external_allocated_memory_;
+ // ===========================================================================
+ // Prologue/epilogue callback methods.========================================
+ // ===========================================================================
- // Caches the amount of external memory registered at the last global gc.
- int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+ void AddGCPrologueCallback(v8::Isolate::GCCallback callback,
+ GCType gc_type_filter, bool pass_isolate = true);
+ void RemoveGCPrologueCallback(v8::Isolate::GCCallback callback);
- // This can be calculated directly from a pointer to the heap; however, it is
- // more expedient to get at the isolate directly from within Heap methods.
- Isolate* isolate_;
+ void AddGCEpilogueCallback(v8::Isolate::GCCallback callback,
+ GCType gc_type_filter, bool pass_isolate = true);
+ void RemoveGCEpilogueCallback(v8::Isolate::GCCallback callback);
- Object* roots_[kRootListLength];
+ void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
+ void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
- size_t code_range_size_;
- int reserved_semispace_size_;
- int max_semi_space_size_;
- int initial_semispace_size_;
- int target_semispace_size_;
- intptr_t max_old_generation_size_;
- intptr_t initial_old_generation_size_;
- bool old_generation_size_configured_;
- intptr_t max_executable_size_;
- intptr_t maximum_committed_;
+ // ===========================================================================
+ // Allocation methods. =======================================================
+ // ===========================================================================
- // For keeping track of how much data has survived
- // scavenge since last new space expansion.
- int survived_since_last_expansion_;
+ // Creates a filler object and returns a heap object immediately after it.
+ MUST_USE_RESULT HeapObject* PrecedeWithFiller(HeapObject* object,
+ int filler_size);
- // ... and since the last scavenge.
- int survived_last_scavenge_;
+ // Creates a filler object if needed for alignment and returns a heap object
+ // immediately after it. If any space is left after the returned object,
+ // another filler object is created so the over allocated memory is iterable.
+ MUST_USE_RESULT HeapObject* AlignWithFiller(HeapObject* object,
+ int object_size,
+ int allocation_size,
+ AllocationAlignment alignment);
- int always_allocate_scope_depth_;
+ // ===========================================================================
+ // ArrayBuffer tracking. =====================================================
+ // ===========================================================================
- // For keeping track of context disposals.
- int contexts_disposed_;
+ void RegisterNewArrayBuffer(JSArrayBuffer* buffer);
+ void UnregisterArrayBuffer(JSArrayBuffer* buffer);
- int global_ic_age_;
+ inline ArrayBufferTracker* array_buffer_tracker() {
+ return array_buffer_tracker_;
+ }
- int scan_on_scavenge_pages_;
+// =============================================================================
- NewSpace new_space_;
- OldSpace* old_space_;
- OldSpace* code_space_;
- MapSpace* map_space_;
- LargeObjectSpace* lo_space_;
- HeapState gc_state_;
- int gc_post_processing_depth_;
- Address new_space_top_after_last_gc_;
+#ifdef VERIFY_HEAP
+ // Verify the heap is in its normal state before or after a GC.
+ void Verify();
+#endif
- // Returns the amount of external memory registered since last global gc.
- int64_t PromotedExternalMemorySize();
+#ifdef DEBUG
+ void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
- // How many "runtime allocations" happened.
- uint32_t allocations_count_;
+ void TracePathToObjectFrom(Object* target, Object* root);
+ void TracePathToObject(Object* target);
+ void TracePathToGlobal();
- // Running hash over allocations performed.
- uint32_t raw_allocations_hash_;
+ void Print();
+ void PrintHandles();
- // Countdown counter, dumps allocation hash when 0.
- uint32_t dump_allocations_hash_countdown_;
+ // Report heap statistics.
+ void ReportHeapStatistics(const char* title);
+ void ReportCodeStatistics(const char* title);
+#endif
- // How many mark-sweep collections happened.
- unsigned int ms_count_;
+ private:
+ class UnmapFreeMemoryTask;
- // How many gc happened.
- unsigned int gc_count_;
+ // External strings table is a place where all external strings are
+ // registered. We need to keep track of such strings to properly
+ // finalize them.
+ class ExternalStringTable {
+ public:
+ // Registers an external string.
+ inline void AddString(String* string);
- // For post mortem debugging.
- static const int kRememberedUnmappedPages = 128;
- int remembered_unmapped_pages_index_;
- Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+ inline void Iterate(ObjectVisitor* v);
- // Total length of the strings we failed to flatten since the last GC.
- int unflattened_strings_length_;
+ // Restores internal invariant and gets rid of collected strings.
+ // Must be called after each Iterate() that modified the strings.
+ void CleanUp();
-#define ROOT_ACCESSOR(type, name, camel_name) \
- inline void set_##name(type* value);
- ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
+ // Destroys all allocated memory.
+ void TearDown();
-#ifdef DEBUG
- // If the --gc-interval flag is set to a positive value, this
- // variable holds the value indicating the number of allocations
- // remain until the next failure and garbage collection.
- int allocation_timeout_;
-#endif // DEBUG
+ private:
+ explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
- // Limit that triggers a global GC on the next (normally caused) GC. This
- // is checked when we have already decided to do a GC to help determine
- // which collector to invoke, before expanding a paged space in the old
- // generation and on every allocation in large object space.
- intptr_t old_generation_allocation_limit_;
+ inline void Verify();
- // Indicates that an allocation has failed in the old generation since the
- // last GC.
- bool old_gen_exhausted_;
+ inline void AddOldString(String* string);
- // Indicates that memory usage is more important than latency.
- // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
- bool optimize_for_memory_usage_;
+ // Notifies the table that only a prefix of the new list is valid.
+ inline void ShrinkNewStrings(int position);
- // Indicates that inline bump-pointer allocation has been globally disabled
- // for all spaces. This is used to disable allocations in generated code.
- bool inline_allocation_disabled_;
+ // To speed up scavenge collections new space string are kept
+ // separate from old space strings.
+ List<Object*> new_space_strings_;
+ List<Object*> old_space_strings_;
- // Weak list heads, threaded through the objects.
- // List heads are initialized lazily and contain the undefined_value at start.
- Object* native_contexts_list_;
- Object* allocation_sites_list_;
+ Heap* heap_;
- // List of encountered weak collections (JSWeakMap and JSWeakSet) during
- // marking. It is initialized during marking, destroyed after marking and
- // contains Smi(0) while marking is not active.
- Object* encountered_weak_collections_;
+ friend class Heap;
- Object* encountered_weak_cells_;
+ DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
+ };
- StoreBufferRebuilder store_buffer_rebuilder_;
+ struct StrongRootsList;
struct StringTypeTable {
InstanceType type;
@@ -1822,16 +1596,6 @@ class Heap {
RootListIndex index;
};
- static const StringTypeTable string_type_table[];
- static const ConstantStringTable constant_string_table[];
- static const StructTable struct_table[];
-
- // The special hidden string which is an empty string, but does not match
- // any string when looked up in properties.
- String* hidden_string_;
-
- void AddPrivateGlobalSymbols(Handle<Object> private_intern_table);
-
struct GCCallbackPair {
GCCallbackPair(v8::Isolate::GCCallback callback, GCType gc_type,
bool pass_isolate)
@@ -1846,13 +1610,65 @@ class Heap {
bool pass_isolate;
};
- List<GCCallbackPair> gc_epilogue_callbacks_;
- List<GCCallbackPair> gc_prologue_callbacks_;
+ typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+ Object** pointer);
- // Code that should be run before and after each GC. Includes some
- // reporting/verification activities when compiled with DEBUG set.
- void GarbageCollectionPrologue();
- void GarbageCollectionEpilogue();
+ static const int kInitialStringTableSize = 2048;
+ static const int kInitialEvalCacheSize = 64;
+ static const int kInitialNumberStringCacheSize = 256;
+
+ static const int kRememberedUnmappedPages = 128;
+
+ static const StringTypeTable string_type_table[];
+ static const ConstantStringTable constant_string_table[];
+ static const StructTable struct_table[];
+
+ static const int kYoungSurvivalRateHighThreshold = 90;
+ static const int kYoungSurvivalRateAllowedDeviation = 15;
+ static const int kOldSurvivalRateLowThreshold = 10;
+
+ static const int kMaxMarkCompactsInIdleRound = 7;
+ static const int kIdleScavengeThreshold = 5;
+
+ static const int kAllocationSiteScratchpadSize = 256;
+
+ Heap();
+
+ static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+ Heap* heap, Object** pointer);
+
+ static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+ StoreBufferEvent event);
+
+ // Selects the proper allocation space based on the pretenuring decision.
+ static AllocationSpace SelectSpace(PretenureFlag pretenure) {
+ return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
+ }
+
+#define ROOT_ACCESSOR(type, name, camel_name) \
+ inline void set_##name(type* value);
+ ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+ StoreBuffer* store_buffer() { return &store_buffer_; }
+
+ void set_current_gc_flags(int flags) {
+ current_gc_flags_ = flags;
+ DCHECK(!ShouldFinalizeIncrementalMarking() ||
+ !ShouldAbortIncrementalMarking());
+ }
+
+ inline bool ShouldReduceMemory() const {
+ return current_gc_flags_ & kReduceMemoryFootprintMask;
+ }
+
+ inline bool ShouldAbortIncrementalMarking() const {
+ return current_gc_flags_ & kAbortIncrementalMarkingMask;
+ }
+
+ inline bool ShouldFinalizeIncrementalMarking() const {
+ return current_gc_flags_ & kFinalizeIncrementalMarkingMask;
+ }
void PreprocessStackTraces();
@@ -1892,22 +1708,260 @@ class Heap {
inline void UpdateOldSpaceLimits();
- // Selects the proper allocation space depending on the given object
- // size and pretenuring decision.
- static AllocationSpace SelectSpace(int object_size,
- PretenureFlag pretenure) {
- if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
- return (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE;
- }
+ // Initializes a JSObject based on its map.
+ void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+ Map* map);
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
+
+ bool CreateInitialMaps();
+ void CreateInitialObjects();
+
+ // These five Create*EntryStub functions are here and forced to not be inlined
+ // because of a gcc-4.4 bug that assigns wrong vtable entries.
+ NO_INLINE(void CreateJSEntryStub());
+ NO_INLINE(void CreateJSConstructEntryStub());
+
+ void CreateFixedStubs();
HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
+ // Commits from space if it is uncommitted.
+ void EnsureFromSpaceIsCommitted();
+
+ // Uncommit unused semi space.
+ bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+ // Fill in bogus values in from space
+ void ZapFromSpace();
+
+ // Deopts all code that contains allocation instruction which are tenured or
+ // not tenured. Moreover it clears the pretenuring allocation site statistics.
+ void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+ // Evaluates local pretenuring for the old space and calls
+ // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+ // the old space.
+ void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+
+ // Record statistics before and after garbage collection.
+ void ReportStatisticsBeforeGC();
+ void ReportStatisticsAfterGC();
+
+ // Creates and installs the full-sized number string cache.
+ int FullSizeNumberStringCacheLength();
+ // Flush the number to string cache.
+ void FlushNumberStringCache();
+
+ // Sets used allocation sites entries to undefined.
+ void FlushAllocationSitesScratchpad();
+
+ // Initializes the allocation sites scratchpad with undefined values.
+ void InitializeAllocationSitesScratchpad();
+
+ // Adds an allocation site to the scratchpad if there is space left.
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
+
+ // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+ // Re-visit incremental marking heuristics.
+ bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
+
+ void ConfigureInitialOldGenerationSize();
+
+ bool HasLowYoungGenerationAllocationRate();
+ bool HasLowOldGenerationAllocationRate();
+ double YoungGenerationMutatorUtilization();
+ double OldGenerationMutatorUtilization();
+
+ void ReduceNewSpaceSize();
+
+ bool TryFinalizeIdleIncrementalMarking(
+ double idle_time_in_ms, size_t size_of_objects,
+ size_t mark_compact_speed_in_bytes_per_ms);
+
+ GCIdleTimeHeapState ComputeHeapState();
+
+ bool PerformIdleTimeAction(GCIdleTimeAction action,
+ GCIdleTimeHeapState heap_state,
+ double deadline_in_ms);
+
+ void IdleNotificationEpilogue(GCIdleTimeAction action,
+ GCIdleTimeHeapState heap_state, double start_ms,
+ double deadline_in_ms);
+ void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
+ double now_ms);
+
+ inline void UpdateAllocationsHash(HeapObject* object);
+ inline void UpdateAllocationsHash(uint32_t value);
+ void PrintAlloctionsHash();
+
+ void AddToRingBuffer(const char* string);
+ void GetFromRingBuffer(char* buffer);
+
+ // Attempt to over-approximate the weak closure by marking object groups and
+ // implicit references from global handles, but don't atomically complete
+ // marking. If we continue to mark incrementally, we might have marked
+ // objects that die later.
+ void OverApproximateWeakClosure(const char* gc_reason);
+
+ // ===========================================================================
+ // Actual GC. ================================================================
+ // ===========================================================================
+
+ // Code that should be run before and after each GC. Includes some
+ // reporting/verification activities when compiled with DEBUG set.
+ void GarbageCollectionPrologue();
+ void GarbageCollectionEpilogue();
+
+ // Performs a major collection in the whole heap.
+ void MarkCompact();
+
+ // Code to be run before and after mark-compact.
+ void MarkCompactPrologue();
+ void MarkCompactEpilogue();
+
+ // Performs a minor collection in new generation.
+ void Scavenge();
+
+ Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+
+ void UpdateNewSpaceReferencesInExternalStringTable(
+ ExternalStringTableUpdaterCallback updater_func);
+
+ void UpdateReferencesInExternalStringTable(
+ ExternalStringTableUpdaterCallback updater_func);
+
+ void ProcessAllWeakReferences(WeakObjectRetainer* retainer);
+ void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
+ void ProcessNativeContexts(WeakObjectRetainer* retainer);
+ void ProcessAllocationSites(WeakObjectRetainer* retainer);
+
+ // ===========================================================================
+ // GC statistics. ============================================================
+ // ===========================================================================
+
+ inline intptr_t OldGenerationSpaceAvailable() {
+ return old_generation_allocation_limit_ - PromotedTotalSize();
+ }
+
+ // Returns maximum GC pause.
+ double get_max_gc_pause() { return max_gc_pause_; }
+
+ // Returns maximum size of objects alive after GC.
+ intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
+
+ // Returns minimal interval between two subsequent collections.
+ double get_min_in_mutator() { return min_in_mutator_; }
+
+ // Update GC statistics that are tracked on the Heap.
+ void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
+ double marking_time);
+
+ bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+
+ // ===========================================================================
+ // Growing strategy. =========================================================
+ // ===========================================================================
+
+ // Decrease the allocation limit if the new limit based on the given
+ // parameters is lower than the current limit.
+ void DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
+ double gc_speed,
+ double mutator_speed);
+
+
+ // Calculates the allocation limit based on a given growing factor and a
+ // given old generation size.
+ intptr_t CalculateOldGenerationAllocationLimit(double factor,
+ intptr_t old_gen_size);
+
+ // Sets the allocation limit to trigger the next full garbage collection.
+ void SetOldGenerationAllocationLimit(intptr_t old_gen_size, double gc_speed,
+ double mutator_speed);
+
+ // ===========================================================================
+ // Inline allocation. ========================================================
+ // ===========================================================================
+
+ void LowerInlineAllocationLimit(intptr_t step);
+ void ResetInlineAllocationLimit();
+
+ // ===========================================================================
+ // Idle notification. ========================================================
+ // ===========================================================================
+
+ bool RecentIdleNotificationHappened();
+ void ScheduleIdleScavengeIfNeeded(int bytes_allocated);
+
+ // ===========================================================================
+ // Allocation methods. =======================================================
+ // ===========================================================================
+
+ // Returns a deep copy of the JavaScript object.
+ // Properties and elements are copied too.
+ // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+ MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
+ AllocationSite* site = NULL);
+
+ // Allocates a JS Map in the heap.
+ MUST_USE_RESULT AllocationResult
+ AllocateMap(InstanceType instance_type, int instance_size,
+ ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+
+ // Allocates and initializes a new JavaScript object based on a
+ // constructor.
+ // If allocation_site is non-null, then a memento is emitted after the object
+ // that points to the site.
+ MUST_USE_RESULT AllocationResult AllocateJSObject(
+ JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
+
+ // Allocates and initializes a new JavaScript object based on a map.
+ // Passing an allocation site means that a memento will be created that
+ // points to the site.
+ MUST_USE_RESULT AllocationResult
+ AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+ AllocationSite* allocation_site = NULL);
+
+ // Allocates a HeapNumber from value.
+ MUST_USE_RESULT AllocationResult
+ AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+ PretenureFlag pretenure = NOT_TENURED);
+
+// Allocates SIMD values from the given lane values.
+#define SIMD_ALLOCATE_DECLARATION(TYPE, Type, type, lane_count, lane_type) \
+ AllocationResult Allocate##Type(lane_type lanes[lane_count], \
+ PretenureFlag pretenure = NOT_TENURED);
+ SIMD128_TYPES(SIMD_ALLOCATE_DECLARATION)
+#undef SIMD_ALLOCATE_DECLARATION
+
+ // Allocates a byte array of the specified length
+ MUST_USE_RESULT AllocationResult
+ AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+ // Allocates a bytecode array with given contents.
+ MUST_USE_RESULT AllocationResult
+ AllocateBytecodeArray(int length, const byte* raw_bytecodes, int frame_size,
+ int parameter_count, FixedArray* constant_pool);
+
+ // Copy the code and scope info part of the code object, but insert
+ // the provided data as the relocation information.
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code,
+ Vector<byte> reloc_info);
+
+ MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+
+ // Allocates a fixed array initialized with undefined values
+ MUST_USE_RESULT AllocationResult
+ AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
// Allocate an uninitialized object. The memory is non-executable if the
// hardware and OS allow. This is the single choke-point for allocations
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
MUST_USE_RESULT inline AllocationResult AllocateRaw(
- int size_in_bytes, AllocationSpace space, AllocationSpace retry_space,
+ int size_in_bytes, AllocationSpace space,
AllocationAlignment aligment = kWordAligned);
// Allocates a heap object based on the map.
@@ -1919,12 +1973,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocatePartialMap(InstanceType instance_type, int instance_size);
- // Initializes a JSObject based on its map.
- void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
- Map* map);
- void InitializeAllocationMemento(AllocationMemento* memento,
- AllocationSite* allocation_site);
-
// Allocate a block of memory in the given space (filled with a filler).
// Used as a fall-back for generated code when the space is full.
MUST_USE_RESULT AllocationResult
@@ -1952,9 +2000,6 @@ class Heap {
MUST_USE_RESULT AllocationResult
AllocateRawTwoByteString(int length, PretenureFlag pretenure);
- bool CreateInitialMaps();
- void CreateInitialObjects();
-
// Allocates an internalized string in old space based on the character
// stream.
MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
@@ -2021,13 +2066,6 @@ class Heap {
MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
int length, PretenureFlag pretenure = NOT_TENURED);
- // These five Create*EntryStub functions are here and forced to not be inlined
- // because of a gcc-4.4 bug that assigns wrong vtable entries.
- NO_INLINE(void CreateJSEntryStub());
- NO_INLINE(void CreateJSConstructEntryStub());
-
- void CreateFixedStubs();
-
// Allocate empty fixed array.
MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
@@ -2057,79 +2095,127 @@ class Heap {
MUST_USE_RESULT AllocationResult InternalizeString(String* str);
- // Performs a minor collection in new generation.
- void Scavenge();
+ // The amount of external memory registered through the API kept alive
+ // by global handles
+ int64_t amount_of_external_allocated_memory_;
- // Commits from space if it is uncommitted.
- void EnsureFromSpaceIsCommitted();
+ // Caches the amount of external memory registered at the last global gc.
+ int64_t amount_of_external_allocated_memory_at_last_global_gc_;
- // Uncommit unused semi space.
- bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+ // This can be calculated directly from a pointer to the heap; however, it is
+ // more expedient to get at the isolate directly from within Heap methods.
+ Isolate* isolate_;
- // Fill in bogus values in from space
- void ZapFromSpace();
+ Object* roots_[kRootListLength];
- static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
- Heap* heap, Object** pointer);
+ size_t code_range_size_;
+ int reserved_semispace_size_;
+ int max_semi_space_size_;
+ int initial_semispace_size_;
+ int target_semispace_size_;
+ intptr_t max_old_generation_size_;
+ intptr_t initial_old_generation_size_;
+ bool old_generation_size_configured_;
+ intptr_t max_executable_size_;
+ intptr_t maximum_committed_;
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
- static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
- StoreBufferEvent event);
+ // For keeping track of how much data has survived
+ // scavenge since last new space expansion.
+ int survived_since_last_expansion_;
- // Performs a major collection in the whole heap.
- void MarkCompact();
+ // ... and since the last scavenge.
+ int survived_last_scavenge_;
- // Code to be run before and after mark-compact.
- void MarkCompactPrologue();
- void MarkCompactEpilogue();
+ // This is not the depth of nested AlwaysAllocateScope's but rather a single
+ // count, as scopes can be acquired from multiple tasks (read: threads).
+ AtomicNumber<size_t> always_allocate_scope_count_;
- void ProcessNativeContexts(WeakObjectRetainer* retainer);
- void ProcessAllocationSites(WeakObjectRetainer* retainer);
+ // For keeping track of context disposals.
+ int contexts_disposed_;
- // Deopts all code that contains allocation instruction which are tenured or
- // not tenured. Moreover it clears the pretenuring allocation site statistics.
- void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+ int global_ic_age_;
- // Evaluates local pretenuring for the old space and calls
- // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
- // the old space.
- void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+ int scan_on_scavenge_pages_;
- // Called on heap tear-down. Frees all remaining ArrayBuffer backing stores.
- void TearDownArrayBuffers();
+ NewSpace new_space_;
+ OldSpace* old_space_;
+ OldSpace* code_space_;
+ MapSpace* map_space_;
+ LargeObjectSpace* lo_space_;
+ HeapState gc_state_;
+ int gc_post_processing_depth_;
+ Address new_space_top_after_last_gc_;
- // Record statistics before and after garbage collection.
- void ReportStatisticsBeforeGC();
- void ReportStatisticsAfterGC();
+ // Returns the amount of external memory registered since last global gc.
+ int64_t PromotedExternalMemorySize();
- // Total RegExp code ever generated
- double total_regexp_code_generated_;
+ // How many "runtime allocations" happened.
+ uint32_t allocations_count_;
- int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
+ // Running hash over allocations performed.
+ uint32_t raw_allocations_hash_;
- GCTracer tracer_;
+ // Countdown counter, dumps allocation hash when 0.
+ uint32_t dump_allocations_hash_countdown_;
- // Creates and installs the full-sized number string cache.
- int FullSizeNumberStringCacheLength();
- // Flush the number to string cache.
- void FlushNumberStringCache();
+ // How many mark-sweep collections happened.
+ unsigned int ms_count_;
- // Sets used allocation sites entries to undefined.
- void FlushAllocationSitesScratchpad();
+ // How many gc happened.
+ unsigned int gc_count_;
- // Initializes the allocation sites scratchpad with undefined values.
- void InitializeAllocationSitesScratchpad();
+ // For post mortem debugging.
+ int remembered_unmapped_pages_index_;
+ Address remembered_unmapped_pages_[kRememberedUnmappedPages];
- // Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site,
- ScratchpadSlotMode mode);
+#ifdef DEBUG
+ // If the --gc-interval flag is set to a positive value, this
+ // variable holds the value indicating the number of allocations
+ // remain until the next failure and garbage collection.
+ int allocation_timeout_;
+#endif // DEBUG
- void UpdateSurvivalStatistics(int start_new_space_size);
+ // Limit that triggers a global GC on the next (normally caused) GC. This
+ // is checked when we have already decided to do a GC to help determine
+ // which collector to invoke, before expanding a paged space in the old
+ // generation and on every allocation in large object space.
+ intptr_t old_generation_allocation_limit_;
- static const int kYoungSurvivalRateHighThreshold = 90;
- static const int kYoungSurvivalRateAllowedDeviation = 15;
+ // Indicates that an allocation has failed in the old generation since the
+ // last GC.
+ bool old_gen_exhausted_;
- static const int kOldSurvivalRateLowThreshold = 10;
+ // Indicates that memory usage is more important than latency.
+ // TODO(ulan): Merge it with memory reducer once chromium:490559 is fixed.
+ bool optimize_for_memory_usage_;
+
+ // Indicates that inline bump-pointer allocation has been globally disabled
+ // for all spaces. This is used to disable allocations in generated code.
+ bool inline_allocation_disabled_;
+
+ // Weak list heads, threaded through the objects.
+ // List heads are initialized lazily and contain the undefined_value at start.
+ Object* native_contexts_list_;
+ Object* allocation_sites_list_;
+
+ // List of encountered weak collections (JSWeakMap and JSWeakSet) during
+ // marking. It is initialized during marking, destroyed after marking and
+ // contains Smi(0) while marking is not active.
+ Object* encountered_weak_collections_;
+
+ Object* encountered_weak_cells_;
+
+ StoreBufferRebuilder store_buffer_rebuilder_;
+
+ List<GCCallbackPair> gc_epilogue_callbacks_;
+ List<GCCallbackPair> gc_prologue_callbacks_;
+
+ // Total RegExp code ever generated
+ double total_regexp_code_generated_;
+
+ int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
+
+ GCTracer* tracer_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
@@ -2148,52 +2234,6 @@ class Heap {
// of the allocation site.
unsigned int maximum_size_scavenges_;
- // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
- // Re-visit incremental marking heuristics.
- bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
-
- void ConfigureInitialOldGenerationSize();
-
- void SelectScavengingVisitorsTable();
-
- bool HasLowYoungGenerationAllocationRate();
- bool HasLowOldGenerationAllocationRate();
- double YoungGenerationMutatorUtilization();
- double OldGenerationMutatorUtilization();
-
- void ReduceNewSpaceSize();
-
- bool TryFinalizeIdleIncrementalMarking(
- double idle_time_in_ms, size_t size_of_objects,
- size_t mark_compact_speed_in_bytes_per_ms);
-
- GCIdleTimeHandler::HeapState ComputeHeapState();
-
- bool PerformIdleTimeAction(GCIdleTimeAction action,
- GCIdleTimeHandler::HeapState heap_state,
- double deadline_in_ms);
-
- void IdleNotificationEpilogue(GCIdleTimeAction action,
- GCIdleTimeHandler::HeapState heap_state,
- double start_ms, double deadline_in_ms);
- void CheckAndNotifyBackgroundIdleNotification(double idle_time_in_ms,
- double now_ms);
-
- void ClearObjectStats(bool clear_last_time_stats = false);
-
- inline void UpdateAllocationsHash(HeapObject* object);
- inline void UpdateAllocationsHash(uint32_t value);
- inline void PrintAlloctionsHash();
-
- void AddToRingBuffer(const char* string);
- void GetFromRingBuffer(char* buffer);
-
- // Object counts and used memory by InstanceType
- size_t object_counts_[OBJECT_STATS_COUNT];
- size_t object_counts_last_time_[OBJECT_STATS_COUNT];
- size_t object_sizes_[OBJECT_STATS_COUNT];
- size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
-
// Maximum GC pause.
double max_gc_pause_;
@@ -2218,15 +2258,21 @@ class Heap {
// Last time a garbage collection happened.
double last_gc_time_;
- MarkCompactCollector mark_compact_collector_;
+ Scavenger* scavenge_collector_;
+
+ MarkCompactCollector* mark_compact_collector_;
StoreBuffer store_buffer_;
- IncrementalMarking incremental_marking_;
+ IncrementalMarking* incremental_marking_;
- GCIdleTimeHandler gc_idle_time_handler_;
+ GCIdleTimeHandler* gc_idle_time_handler_;
- MemoryReducer memory_reducer_;
+ MemoryReducer* memory_reducer_;
+
+ ObjectStats* object_stats_;
+
+ ScavengeJob* scavenge_job_;
// These two counters are monotomically increasing and never reset.
size_t full_codegen_bytes_generated_;
@@ -2250,7 +2296,6 @@ class Heap {
// deoptimization triggered by garbage collection.
int gcs_since_last_deopt_;
- static const int kAllocationSiteScratchpadSize = 256;
int allocation_sites_scratchpad_length_;
char trace_ring_buffer_[kTraceRingBufferSize];
@@ -2260,9 +2305,6 @@ class Heap {
bool ring_buffer_full_;
size_t ring_buffer_end_;
- static const int kMaxMarkCompactsInIdleRound = 7;
- static const int kIdleScavengeThreshold = 5;
-
// Shared state read by the scavenge collector and set by ScavengeObject.
PromotionQueue promotion_queue_;
@@ -2273,12 +2315,18 @@ class Heap {
// Currently set GC flags that are respected by all GC components.
int current_gc_flags_;
- ExternalStringTable external_string_table_;
+ // Currently set GC callback flags that are used to pass information between
+ // the embedder and V8's GC.
+ GCCallbackFlags current_gc_callback_flags_;
- VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+ ExternalStringTable external_string_table_;
MemoryChunk* chunks_queued_for_free_;
+ size_t concurrent_unmapping_tasks_active_;
+
+ base::Semaphore pending_unmapping_tasks_semaphore_;
+
base::Mutex relocation_mutex_;
int gc_callbacks_depth_;
@@ -2287,41 +2335,29 @@ class Heap {
bool concurrent_sweeping_enabled_;
- // |live_array_buffers_| maps externally allocated memory used as backing
- // store for ArrayBuffers to the length of the respective memory blocks.
- //
- // At the beginning of mark/compact, |not_yet_discovered_array_buffers_| is
- // a copy of |live_array_buffers_| and we remove pointers as we discover live
- // ArrayBuffer objects during marking. At the end of mark/compact, the
- // remaining memory blocks can be freed.
- std::map<void*, size_t> live_array_buffers_;
- std::map<void*, size_t> not_yet_discovered_array_buffers_;
-
- // To be able to free memory held by ArrayBuffers during scavenge as well, we
- // have a separate list of allocated memory held by ArrayBuffers in new space.
- //
- // Since mark/compact also evacuates the new space, all pointers in the
- // |live_array_buffers_for_scavenge_| list are also in the
- // |live_array_buffers_| list.
- std::map<void*, size_t> live_array_buffers_for_scavenge_;
- std::map<void*, size_t> not_yet_discovered_array_buffers_for_scavenge_;
-
- struct StrongRootsList;
StrongRootsList* strong_roots_list_;
+ ArrayBufferTracker* array_buffer_tracker_;
+
+ // Classes in "heap" can be friends.
friend class AlwaysAllocateScope;
- friend class Bootstrapper;
- friend class Deserializer;
- friend class Factory;
friend class GCCallbacksScope;
friend class GCTracer;
friend class HeapIterator;
friend class IncrementalMarking;
- friend class Isolate;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
- friend class MapCompact;
+ friend class NewSpace;
+ friend class ObjectStatsVisitor;
friend class Page;
+ friend class Scavenger;
+ friend class StoreBuffer;
+
+ // The allocator interface.
+ friend class Factory;
+
+ // The Isolate constructs us.
+ friend class Isolate;
// Used in cctest.
friend class HeapTester;
@@ -2367,20 +2403,6 @@ class AlwaysAllocateScope {
inline ~AlwaysAllocateScope();
private:
- // Implicitly disable artificial allocation failures.
- Heap* heap_;
- DisallowAllocationFailure daf_;
-};
-
-
-class GCCallbacksScope {
- public:
- explicit inline GCCallbacksScope(Heap* heap);
- inline ~GCCallbacksScope();
-
- inline bool CheckReenter();
-
- private:
Heap* heap_;
};
@@ -2475,32 +2497,30 @@ class SpaceIterator : public Malloced {
// nodes filtering uses GC marks, it can't be used during MS/MC GC
// phases. Also, it is forbidden to interrupt iteration in this mode,
// as this will leave heap objects marked (and thus, unusable).
-class HeapObjectsFilter;
-
class HeapIterator BASE_EMBEDDED {
public:
enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
- explicit HeapIterator(Heap* heap);
- HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
+ explicit HeapIterator(Heap* heap,
+ HeapObjectsFiltering filtering = kNoFiltering);
~HeapIterator();
HeapObject* next();
- void reset();
private:
struct MakeHeapIterableHelper {
explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
};
- // Perform the initialization.
- void Init();
- // Perform all necessary shutdown (destruction) work.
- void Shutdown();
HeapObject* NextObject();
+ // The following two fields need to be declared in this order. Initialization
+ // order guarantees that we first make the heap iterable (which may involve
+ // allocations) and only then lock it down by not allowing further
+ // allocations.
MakeHeapIterableHelper make_heap_iterable_helper_;
DisallowHeapAllocation no_heap_allocation_;
+
Heap* heap_;
HeapObjectsFiltering filtering_;
HeapObjectsFilter* filter_;
@@ -2623,30 +2643,6 @@ class DescriptorLookupCache {
};
-class RegExpResultsCache {
- public:
- enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
-
- // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
- // On success, the returned result is guaranteed to be a COW-array.
- static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
- ResultsCacheType type);
- // Attempt to add value_array to the cache specified by type. On success,
- // value_array is turned into a COW-array.
- static void Enter(Isolate* isolate, Handle<String> key_string,
- Handle<Object> key_pattern, Handle<FixedArray> value_array,
- ResultsCacheType type);
- static void Clear(FixedArray* cache);
- static const int kRegExpResultsCacheSize = 0x100;
-
- private:
- static const int kArrayEntriesPerCacheEntry = 4;
- static const int kStringOffset = 0;
- static const int kPatternOffset = 1;
- static const int kArrayOffset = 2;
-};
-
-
// Abstract base class for checking whether a weak object should be retained.
class WeakObjectRetainer {
public:
diff --git a/deps/v8/src/heap/incremental-marking-inl.h b/deps/v8/src/heap/incremental-marking-inl.h
index fabf59d016..5988426fd5 100644
--- a/deps/v8/src/heap/incremental-marking-inl.h
+++ b/deps/v8/src/heap/incremental-marking-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
new file mode 100644
index 0000000000..43e8b7628f
--- /dev/null
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -0,0 +1,145 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/incremental-marking-job.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/isolate.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+void IncrementalMarkingJob::Start(Heap* heap) {
+ DCHECK(!heap->incremental_marking()->IsStopped());
+ // We don't need to reset the flags because tasks from the previous job
+ // can still be pending. We just want to ensure that tasks are posted
+ // if they are not pending.
+ // If delayed task is pending and made_progress_since_last_delayed_task_ is
+ // true, then the delayed task will clear that flag when it is rescheduled.
+ ScheduleIdleTask(heap);
+ ScheduleDelayedTask(heap);
+}
+
+
+void IncrementalMarkingJob::NotifyIdleTask() { idle_task_pending_ = false; }
+
+
+void IncrementalMarkingJob::NotifyDelayedTask() {
+ delayed_task_pending_ = false;
+}
+
+
+void IncrementalMarkingJob::NotifyIdleTaskProgress() {
+ made_progress_since_last_delayed_task_ = true;
+}
+
+
+void IncrementalMarkingJob::ScheduleIdleTask(Heap* heap) {
+ if (!idle_task_pending_) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+ if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
+ idle_task_pending_ = true;
+ auto task = new IdleTask(heap->isolate(), this);
+ V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
+ }
+ }
+}
+
+
+void IncrementalMarkingJob::ScheduleDelayedTask(Heap* heap) {
+ if (!delayed_task_pending_) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+ delayed_task_pending_ = true;
+ made_progress_since_last_delayed_task_ = false;
+ auto task = new DelayedTask(heap->isolate(), this);
+ V8::GetCurrentPlatform()->CallDelayedOnForegroundThread(isolate, task,
+ kDelayInSeconds);
+ }
+}
+
+
+IncrementalMarkingJob::IdleTask::Progress IncrementalMarkingJob::IdleTask::Step(
+ Heap* heap, double deadline_in_ms) {
+ IncrementalMarking* incremental_marking = heap->incremental_marking();
+ MarkCompactCollector* mark_compact_collector = heap->mark_compact_collector();
+ if (incremental_marking->IsStopped()) {
+ return kDone;
+ }
+ if (mark_compact_collector->sweeping_in_progress()) {
+ if (mark_compact_collector->IsSweepingCompleted()) {
+ mark_compact_collector->EnsureSweepingCompleted();
+ }
+ return kMoreWork;
+ }
+ const double remaining_idle_time_in_ms =
+ incremental_marking->AdvanceIncrementalMarking(
+ 0, deadline_in_ms, IncrementalMarking::IdleStepActions());
+ if (remaining_idle_time_in_ms > 0.0) {
+ heap->TryFinalizeIdleIncrementalMarking(remaining_idle_time_in_ms);
+ }
+ return incremental_marking->IsStopped() ? kDone : kMoreWork;
+}
+
+
+void IncrementalMarkingJob::IdleTask::RunInternal(double deadline_in_seconds) {
+ double deadline_in_ms =
+ deadline_in_seconds *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+ Heap* heap = isolate_->heap();
+ double start_ms = heap->MonotonicallyIncreasingTimeInMs();
+ job_->NotifyIdleTask();
+ job_->NotifyIdleTaskProgress();
+ if (Step(heap, deadline_in_ms) == kMoreWork) {
+ job_->ScheduleIdleTask(heap);
+ }
+ if (FLAG_trace_idle_notification) {
+ double current_time_ms = heap->MonotonicallyIncreasingTimeInMs();
+ double idle_time_in_ms = deadline_in_ms - start_ms;
+ double deadline_difference = deadline_in_ms - current_time_ms;
+ PrintIsolate(isolate_, "%8.0f ms: ", isolate_->time_millis_since_init());
+ PrintF(
+ "Idle task: requested idle time %.2f ms, used idle time %.2f "
+ "ms, deadline usage %.2f ms\n",
+ idle_time_in_ms, idle_time_in_ms - deadline_difference,
+ deadline_difference);
+ }
+}
+
+
+void IncrementalMarkingJob::DelayedTask::Step(Heap* heap) {
+ const int kIncrementalMarkingDelayMs = 50;
+ double deadline =
+ heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
+ heap->incremental_marking()->AdvanceIncrementalMarking(
+ 0, deadline, i::IncrementalMarking::StepActions(
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_MARKING,
+ i::IncrementalMarking::FORCE_COMPLETION));
+ heap->FinalizeIncrementalMarkingIfComplete(
+ "Incremental marking task: finalize incremental marking");
+}
+
+
+void IncrementalMarkingJob::DelayedTask::RunInternal() {
+ Heap* heap = isolate_->heap();
+ job_->NotifyDelayedTask();
+ IncrementalMarking* incremental_marking = heap->incremental_marking();
+ if (!incremental_marking->IsStopped()) {
+ if (job_->ShouldForceMarkingStep()) {
+ Step(heap);
+ }
+ // The Step() above could have finished incremental marking.
+ if (!incremental_marking->IsStopped()) {
+ job_->ScheduleDelayedTask(heap);
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-job.h b/deps/v8/src/heap/incremental-marking-job.h
new file mode 100644
index 0000000000..fad46c1246
--- /dev/null
+++ b/deps/v8/src/heap/incremental-marking-job.h
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_INCREMENTAL_MARKING_JOB_H_
+#define V8_HEAP_INCREMENTAL_MARKING_JOB_H_
+
+#include "src/cancelable-task.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+// The incremental marking job uses platform tasks to perform incremental
+// marking steps. The job posts an idle and a delayed task with a large delay.
+// The delayed task performs steps only if the idle task is not making progress.
+// We expect this to be a rare event since incremental marking should finish
+// quickly with the help of the mutator and the idle task.
+// The delayed task guarantees that we eventually finish incremental marking
+// even if the mutator becomes idle and the platform stops running idle tasks,
+// which can happen for background tabs in Chrome.
+class IncrementalMarkingJob {
+ public:
+ class IdleTask : public CancelableIdleTask {
+ public:
+ explicit IdleTask(Isolate* isolate, IncrementalMarkingJob* job)
+ : CancelableIdleTask(isolate), job_(job) {}
+ enum Progress { kDone, kMoreWork };
+ static Progress Step(Heap* heap, double deadline_in_ms);
+ // CancelableIdleTask overrides.
+ void RunInternal(double deadline_in_seconds) override;
+
+ private:
+ IncrementalMarkingJob* job_;
+ };
+
+ class DelayedTask : public CancelableTask {
+ public:
+ explicit DelayedTask(Isolate* isolate, IncrementalMarkingJob* job)
+ : CancelableTask(isolate), job_(job) {}
+ static void Step(Heap* heap);
+ // CancelableTask overrides.
+ void RunInternal() override;
+
+ private:
+ IncrementalMarkingJob* job_;
+ };
+
+ // Delay of the delayed task.
+ static const int kDelayInSeconds = 5;
+
+ IncrementalMarkingJob()
+ : idle_task_pending_(false),
+ delayed_task_pending_(false),
+ made_progress_since_last_delayed_task_(false) {}
+
+ bool ShouldForceMarkingStep() {
+ return !made_progress_since_last_delayed_task_;
+ }
+
+ bool IdleTaskPending() { return idle_task_pending_; }
+
+ void Start(Heap* heap);
+
+ void NotifyIdleTask();
+ void NotifyDelayedTask();
+ void NotifyIdleTaskProgress();
+ void ScheduleIdleTask(Heap* heap);
+ void ScheduleDelayedTask(Heap* heap);
+
+ private:
+ bool idle_task_pending_;
+ bool delayed_task_pending_;
+ bool made_progress_since_last_delayed_task_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_HEAP_INCREMENTAL_MARKING_JOB_H_
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 9549a148b4..cbc26516bb 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -7,9 +7,12 @@
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/conversions.h"
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -41,8 +44,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
was_activated_(false),
weak_closure_was_overapproximated_(false),
weak_closure_approximation_rounds_(0),
- request_type_(COMPLETE_MARKING),
- gc_callback_flags_(kNoGCCallbackFlags) {}
+ request_type_(COMPLETE_MARKING) {}
void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
@@ -297,12 +299,6 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
if (is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
- // It's difficult to filter out slots recorded for large objects.
- if (chunk->owner()->identity() == LO_SPACE &&
- chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
- chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
- }
} else {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
@@ -310,7 +306,7 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
}
-void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
bool is_marking) {
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
if (is_marking) {
@@ -468,9 +464,7 @@ static void PatchIncrementalMarkingRecordWriteStubs(
}
-void IncrementalMarking::Start(int flags,
- const GCCallbackFlags gc_callback_flags,
- const char* reason) {
+void IncrementalMarking::Start(const char* reason) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start (%s)\n",
(reason == nullptr) ? "unknown reason" : reason);
@@ -482,11 +476,9 @@ void IncrementalMarking::Start(int flags,
ResetStepCounters();
- gc_callback_flags_ = gc_callback_flags;
was_activated_ = true;
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
- heap_->set_current_gc_flags(flags);
StartMarking();
} else {
if (FLAG_trace_incremental_marking) {
@@ -495,7 +487,8 @@ void IncrementalMarking::Start(int flags,
state_ = SWEEPING;
}
- heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+ heap_->LowerInlineAllocationLimit(kAllocatedThreshold);
+ incremental_marking_job()->Start(heap_);
}
@@ -551,6 +544,7 @@ void IncrementalMarking::StartMarking() {
void IncrementalMarking::MarkObjectGroups() {
DCHECK(FLAG_overapproximate_weak_closure);
DCHECK(!weak_closure_was_overapproximated_);
+ DCHECK(IsMarking());
int old_marking_deque_top =
heap_->mark_compact_collector()->marking_deque()->top();
@@ -745,7 +739,7 @@ void IncrementalMarking::Stop() {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Stopping.\n");
}
- heap_->new_space()->LowerInlineAllocationLimit(0);
+ heap_->ResetInlineAllocationLimit();
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
if (IsMarking()) {
@@ -773,7 +767,7 @@ void IncrementalMarking::Finalize() {
Hurry();
state_ = STOPPED;
is_compacting_ = false;
- heap_->new_space()->LowerInlineAllocationLimit(0);
+ heap_->ResetInlineAllocationLimit();
IncrementalMarking::set_should_hurry(false);
ResetStepCounters();
PatchIncrementalMarkingRecordWriteStubs(heap_,
@@ -822,9 +816,38 @@ void IncrementalMarking::Epilogue() {
}
+double IncrementalMarking::AdvanceIncrementalMarking(
+ intptr_t step_size_in_bytes, double deadline_in_ms,
+ IncrementalMarking::StepActions step_actions) {
+ DCHECK(!IsStopped());
+
+ if (step_size_in_bytes == 0) {
+ step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
+ static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
+ static_cast<size_t>(
+ heap()
+ ->tracer()
+ ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
+ }
+
+ double remaining_time_in_ms = 0.0;
+ do {
+ Step(step_size_in_bytes, step_actions.completion_action,
+ step_actions.force_marking, step_actions.force_completion);
+ remaining_time_in_ms =
+ deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
+ } while (remaining_time_in_ms >=
+ 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
+ !IsComplete() &&
+ !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
+ return remaining_time_in_ms;
+}
+
+
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
- Start(Heap::kNoGCFlags, kNoGCCallbackFlags, "old space step");
+ heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
+ "old space step");
} else {
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
@@ -907,6 +930,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action,
ForceMarkingAction marking,
ForceCompletionAction completion) {
+ DCHECK(allocated_bytes >= 0);
+
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING)) {
return 0;
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index fcada78f0b..010392875e 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -5,13 +5,17 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
#define V8_HEAP_INCREMENTAL_MARKING_H_
+#include "src/cancelable-task.h"
#include "src/execution.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/incremental-marking-job.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class MarkBit;
+class PagedSpace;
class IncrementalMarking {
public:
@@ -81,9 +85,7 @@ class IncrementalMarking {
bool WasActivated();
- void Start(int flags,
- const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags,
- const char* reason = nullptr);
+ void Start(const char* reason = nullptr);
void MarkObjectGroups();
@@ -101,6 +103,15 @@ class IncrementalMarking {
void Epilogue();
+ // Performs incremental marking steps of step_size_in_bytes as long as
+ // deadline_ins_ms is not reached. step_size_in_bytes can be 0 to compute
+ // an estimate increment. Returns the remaining time that cannot be used
+ // for incremental marking anymore because a single step would exceed the
+ // deadline.
+ double AdvanceIncrementalMarking(intptr_t step_size_in_bytes,
+ double deadline_in_ms,
+ StepActions step_actions);
+
// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
// of at a moderate rate of work and gradually increase the speed of the
@@ -173,7 +184,7 @@ class IncrementalMarking {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
}
- inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+ inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
SetNewSpacePageFlags(chunk, IsMarking());
}
@@ -199,7 +210,9 @@ class IncrementalMarking {
Heap* heap() const { return heap_; }
- GCCallbackFlags CallbackFlags() const { return gc_callback_flags_; }
+ IncrementalMarkingJob* incremental_marking_job() {
+ return &incremental_marking_job_;
+ }
private:
int64_t SpaceLeftInOldSpace();
@@ -221,7 +234,7 @@ class IncrementalMarking {
static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
bool is_compacting);
- static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+ static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
INLINE(void ProcessMarkingDeque());
@@ -259,7 +272,7 @@ class IncrementalMarking {
GCRequestType request_type_;
- GCCallbackFlags gc_callback_flags_;
+ IncrementalMarkingJob incremental_marking_job_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 6372c2eeea..6e3ebd7fc7 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -6,6 +6,7 @@
#define V8_HEAP_MARK_COMPACT_INL_H_
#include "src/heap/mark-compact.h"
+#include "src/heap/slots-buffer.h"
#include "src/isolate.h"
namespace v8 {
@@ -56,19 +57,31 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(HeapObject* object, Object** slot,
- Object* target,
- SlotsBuffer::AdditionMode mode) {
+ Object* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
- target_page->slots_buffer_address(), slot, mode)) {
+ if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
+ target_page->slots_buffer_address(), slot,
+ SlotsBuffer::FAIL_ON_OVERFLOW)) {
EvictPopularEvacuationCandidate(target_page);
}
}
}
+void MarkCompactCollector::ForceRecordSlot(HeapObject* object, Object** slot,
+ Object* target) {
+ Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ if (target_page->IsEvacuationCandidate() &&
+ !ShouldSkipEvacuationSlotRecording(object)) {
+ CHECK(SlotsBuffer::AddTo(slots_buffer_allocator_,
+ target_page->slots_buffer_address(), slot,
+ SlotsBuffer::IGNORE_OVERFLOW));
+ }
+}
+
+
void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
if (GetNextCandidate(shared_info) == NULL) {
SetNextCandidate(shared_info, shared_function_info_candidates_head_);
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index ee5b4a0ad4..9e317e7d08 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -6,22 +6,26 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
+#include "src/base/sys-info.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/frames-inl.h"
#include "src/gdb-jit.h"
#include "src/global-handles.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/mark-compact-inl.h"
+#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/slots-buffer.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap-profiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/v8.h"
namespace v8 {
@@ -34,6 +38,11 @@ const char* Marking::kGreyBitPattern = "11";
const char* Marking::kImpossibleBitPattern = "01";
+// The following has to hold in order for {Marking::MarkBitFrom} to not produce
+// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
+STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
+
+
// -------------------------------------------------------------------------
// MarkCompactCollector
@@ -43,17 +52,21 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
state_(IDLE),
#endif
marking_parity_(ODD_MARKING_PARITY),
- compacting_(false),
was_marked_incrementally_(false),
- sweeping_in_progress_(false),
- pending_sweeper_jobs_semaphore_(0),
evacuation_(false),
- migration_slots_buffer_(NULL),
+ slots_buffer_allocator_(nullptr),
+ migration_slots_buffer_(nullptr),
heap_(heap),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
code_flusher_(NULL),
- have_code_to_deoptimize_(false) {
+ have_code_to_deoptimize_(false),
+ compacting_(false),
+ sweeping_in_progress_(false),
+ compaction_in_progress_(false),
+ pending_sweeper_tasks_semaphore_(0),
+ pending_compaction_tasks_semaphore_(0),
+ concurrent_compaction_tasks_active_(0) {
}
#ifdef VERIFY_HEAP
@@ -229,12 +242,14 @@ void MarkCompactCollector::SetUp() {
free_list_map_space_.Reset(new FreeList(heap_->map_space()));
EnsureMarkingDequeIsReserved();
EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
+ slots_buffer_allocator_ = new SlotsBufferAllocator();
}
void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
+ delete slots_buffer_allocator_;
}
@@ -302,17 +317,17 @@ static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
}
-static void VerifyValidStoreAndSlotsBufferEntries(Heap* heap) {
- heap->store_buffer()->VerifyValidStoreBufferEntries();
+void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
+ heap()->store_buffer()->VerifyValidStoreBufferEntries();
- VerifyValidSlotsBufferEntries(heap, heap->old_space());
- VerifyValidSlotsBufferEntries(heap, heap->code_space());
- VerifyValidSlotsBufferEntries(heap, heap->map_space());
+ VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
+ VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
+ VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
- LargeObjectIterator it(heap->lo_space());
+ LargeObjectIterator it(heap()->lo_space());
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- SlotsBuffer::VerifySlots(heap, chunk->slots_buffer());
+ SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
}
}
#endif
@@ -348,7 +363,7 @@ void MarkCompactCollector::CollectGarbage() {
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
- VerifyValidStoreAndSlotsBufferEntries(heap_);
+ VerifyValidStoreAndSlotsBufferEntries();
}
#endif
@@ -458,6 +473,30 @@ void MarkCompactCollector::ClearMarkbits() {
}
+class MarkCompactCollector::CompactionTask : public v8::Task {
+ public:
+ explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
+ : heap_(heap), spaces_(spaces) {}
+
+ virtual ~CompactionTask() {}
+
+ private:
+ // v8::Task overrides.
+ void Run() override {
+ MarkCompactCollector* mark_compact = heap_->mark_compact_collector();
+ SlotsBuffer* evacuation_slots_buffer = nullptr;
+ mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
+ mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
+ mark_compact->pending_compaction_tasks_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+ CompactionSpaceCollection* spaces_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompactionTask);
+};
+
+
class MarkCompactCollector::SweeperTask : public v8::Task {
public:
SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
@@ -468,7 +507,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
// v8::Task overrides.
void Run() override {
heap_->mark_compact_collector()->SweepInParallel(space_, 0);
- heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
}
Heap* heap_;
@@ -518,12 +557,13 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
- // Wait twice for both jobs.
+
if (heap()->concurrent_sweeping_enabled()) {
- pending_sweeper_jobs_semaphore_.Wait();
- pending_sweeper_jobs_semaphore_.Wait();
- pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
}
+
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
RefillFreeList(heap()->paged_space(OLD_SPACE));
@@ -542,11 +582,11 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
bool MarkCompactCollector::IsSweepingCompleted() {
- if (!pending_sweeper_jobs_semaphore_.WaitFor(
+ if (!pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
return false;
}
- pending_sweeper_jobs_semaphore_.Signal();
+ pending_sweeper_tasks_semaphore_.Signal();
return true;
}
@@ -758,7 +798,7 @@ void MarkCompactCollector::AbortCompaction() {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+ slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
@@ -784,6 +824,10 @@ void MarkCompactCollector::Prepare() {
EnsureSweepingCompleted();
}
+ // If concurrent unmapping tasks are still running, we should wait for
+ // them here.
+ heap()->WaitUntilUnmappingOfFreeChunksCompleted();
+
// Clear marking bits if incremental marking is aborted.
if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
heap()->incremental_marking()->Stop();
@@ -969,8 +1013,8 @@ void CodeFlusher::ProcessOptimizedCodeMaps() {
STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
Context* context =
Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
- Code* code =
- Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+ HeapObject* code = HeapObject::cast(
+ code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
FixedArray* literals = FixedArray::cast(
code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
Smi* ast_id =
@@ -1198,19 +1242,6 @@ MarkCompactCollector::~MarkCompactCollector() {
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
- HeapObject* obj);
-
- static void ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type);
-
- template <MarkCompactMarkingVisitor::VisitorId id>
- class ObjectStatsTracker {
- public:
- static inline void Visit(Map* map, HeapObject* obj);
- };
-
static void Initialize();
INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
@@ -1373,132 +1404,6 @@ class MarkCompactMarkingVisitor
// Visit the fields of the RegExp, including the updated FixedArray.
VisitJSRegExp(map, object);
}
-
- static VisitorDispatchTable<Callback> non_count_table_;
-};
-
-
-void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type) {
- Heap* heap = fixed_array->map()->GetHeap();
- if (fixed_array->map() != heap->fixed_cow_array_map() &&
- fixed_array->map() != heap->fixed_double_array_map() &&
- fixed_array != heap->empty_fixed_array()) {
- if (fixed_array->IsDictionary()) {
- heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
- } else {
- heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
- }
- }
-}
-
-
-void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
- MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- heap->RecordObjectStats(map->instance_type(), object_size);
- non_count_table_.GetVisitorById(id)(map, obj);
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
- ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
- FAST_ELEMENTS_SUB_TYPE);
- ObjectStatsCountFixedArray(object->properties(),
- DICTIONARY_PROPERTIES_SUB_TYPE,
- FAST_PROPERTIES_SUB_TYPE);
- }
-}
-
-
-template <MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
- HeapObject* obj) {
- ObjectStatsVisitBase(id, map, obj);
-}
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitMap> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- Map* map_obj = Map::cast(obj);
- DCHECK(map->instance_type() == MAP_TYPE);
- DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() &&
- array != heap->empty_descriptor_array()) {
- int fixed_array_size = array->Size();
- heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
- if (TransitionArray::IsFullTransitionArray(map_obj->raw_transitions())) {
- int fixed_array_size =
- TransitionArray::cast(map_obj->raw_transitions())->Size();
- heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
- if (map_obj->has_code_cache()) {
- CodeCache* cache = CodeCache::cast(map_obj->code_cache());
- heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
- cache->default_cache()->Size());
- if (!cache->normal_type_cache()->IsUndefined()) {
- heap->RecordFixedArraySubTypeStats(
- MAP_CODE_CACHE_SUB_TYPE,
- FixedArray::cast(cache->normal_type_cache())->Size());
- }
- }
- ObjectStatsVisitBase(kVisitMap, map, obj);
- }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitCode> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- DCHECK(map->instance_type() == CODE_TYPE);
- Code* code_obj = Code::cast(obj);
- heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetAge(),
- object_size);
- ObjectStatsVisitBase(kVisitCode, map, obj);
- }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->RecordFixedArraySubTypeStats(
- SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
- }
- ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
- }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitFixedArray> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- FixedArray* fixed_array = FixedArray::cast(obj);
- if (fixed_array == heap->string_table()) {
- heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
- fixed_array->Size());
- }
- ObjectStatsVisitBase(kVisitFixedArray, map, obj);
- }
};
@@ -1508,20 +1413,11 @@ void MarkCompactMarkingVisitor::Initialize() {
table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
if (FLAG_track_gc_object_stats) {
- // Copy the visitor table to make call-through possible.
- non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id) \
- table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
- VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
-#undef VISITOR_ID_COUNT_FUNCTION
+ ObjectStatsVisitor::Initialize(&table_);
}
}
-VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
- MarkCompactMarkingVisitor::non_count_table_;
-
-
class CodeMarkingVisitor : public ThreadVisitor {
public:
explicit CodeMarkingVisitor(MarkCompactCollector* collector)
@@ -1836,7 +1732,10 @@ int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
}
Object* target = allocation.ToObjectChecked();
- MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
+ MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
+ heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
+ }
heap()->IncrementSemiSpaceCopiedObjectSize(size);
}
*cells = 0;
@@ -2309,14 +2208,16 @@ void MarkCompactCollector::AfterMarking() {
if (FLAG_track_gc_object_stats) {
if (FLAG_trace_gc_object_stats) {
- heap()->TraceObjectStats();
+ heap()->object_stats_->TraceObjectStats();
}
- heap()->CheckpointObjectStats();
+ heap()->object_stats_->CheckpointObjectStats();
}
}
void MarkCompactCollector::ClearNonLiveReferences() {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_NONLIVEREFERENCES);
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
// is carried out only on maps of JSObjects and related subtypes.
@@ -2610,6 +2511,7 @@ void MarkCompactCollector::AbortWeakCollections() {
void MarkCompactCollector::ProcessAndClearWeakCells() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCELL);
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
@@ -2661,17 +2563,91 @@ void MarkCompactCollector::AbortWeakCells() {
}
-void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
+void MarkCompactCollector::RecordMigratedSlot(
+ Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
+ // When parallel compaction is in progress, store and slots buffer entries
+ // require synchronization.
if (heap_->InNewSpace(value)) {
- heap_->store_buffer()->Mark(slot);
+ if (compaction_in_progress_) {
+ heap_->store_buffer()->MarkSynchronized(slot);
+ } else {
+ heap_->store_buffer()->Mark(slot);
+ }
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+ SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
}
+void MarkCompactCollector::RecordMigratedCodeEntrySlot(
+ Address code_entry, Address code_entry_slot,
+ SlotsBuffer** evacuation_slots_buffer) {
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
+ SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+}
+
+
+void MarkCompactCollector::RecordMigratedCodeObjectSlot(
+ Address code_object, SlotsBuffer** evacuation_slots_buffer) {
+ SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
+ SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
+ SlotsBuffer::IGNORE_OVERFLOW);
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ return SlotsBuffer::CODE_TARGET_SLOT;
+ } else if (RelocInfo::IsCell(rmode)) {
+ return SlotsBuffer::CELL_TARGET_SLOT;
+ } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+ return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+ return SlotsBuffer::DEBUG_TARGET_SLOT;
+ }
+ UNREACHABLE();
+ return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+ SlotsBuffer::ObjectSlot slot) {
+ return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+ Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ RelocInfo::Mode rmode = rinfo->rmode();
+ if (target_page->IsEvacuationCandidate() &&
+ (rinfo->host() == NULL ||
+ !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+ Address addr = rinfo->pc();
+ SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = SlotsBuffer::OBJECT_SLOT;
+ }
+ }
+ bool success = SlotsBuffer::AddTo(
+ slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
+ addr, SlotsBuffer::FAIL_ON_OVERFLOW);
+ if (!success) {
+ EvictPopularEvacuationCandidate(target_page);
+ }
+ }
+}
+
+
// We scavenge new space simultaneously with sweeping. This is done in two
// passes.
//
@@ -2686,21 +2662,23 @@ void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
- int size, AllocationSpace dest) {
+void MarkCompactCollector::MigrateObject(
+ HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
+ SlotsBuffer** evacuation_slots_buffer) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
if (dest == OLD_SPACE) {
+ DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
switch (src->ContentType()) {
case HeapObjectContents::kTaggedValues:
- MigrateObjectTagged(dst, src, size);
+ MigrateObjectTagged(dst, src, size, evacuation_slots_buffer);
break;
case HeapObjectContents::kMixedValues:
- MigrateObjectMixed(dst, src, size);
+ MigrateObjectMixed(dst, src, size, evacuation_slots_buffer);
break;
case HeapObjectContents::kRawValues:
@@ -2711,21 +2689,17 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
if (compacting_ && dst->IsJSFunction()) {
Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
+ RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
+ evacuation_slots_buffer);
}
} else if (dest == CODE_SPACE) {
+ DCHECK(evacuation_slots_buffer != nullptr);
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
- SlotsBuffer::IGNORE_OVERFLOW);
+ RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
+ DCHECK(evacuation_slots_buffer == nullptr);
DCHECK(dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
}
@@ -2734,27 +2708,36 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
}
-void MarkCompactCollector::MigrateObjectTagged(HeapObject* dst, HeapObject* src,
- int size) {
+void MarkCompactCollector::MigrateObjectTagged(
+ HeapObject* dst, HeapObject* src, int size,
+ SlotsBuffer** evacuation_slots_buffer) {
Address src_slot = src->address();
Address dst_slot = dst->address();
for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
Object* value = Memory::Object_at(src_slot);
Memory::Object_at(dst_slot) = value;
- RecordMigratedSlot(value, dst_slot);
+ RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
src_slot += kPointerSize;
dst_slot += kPointerSize;
}
}
-void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
- int size) {
+void MarkCompactCollector::MigrateObjectMixed(
+ HeapObject* dst, HeapObject* src, int size,
+ SlotsBuffer** evacuation_slots_buffer) {
if (src->IsFixedTypedArrayBase()) {
heap()->MoveBlock(dst->address(), src->address(), size);
Address base_pointer_slot =
dst->address() + FixedTypedArrayBase::kBasePointerOffset;
- RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot);
+ RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot,
+ evacuation_slots_buffer);
+ } else if (src->IsBytecodeArray()) {
+ heap()->MoveBlock(dst->address(), src->address(), size);
+ Address constant_pool_slot =
+ dst->address() + BytecodeArray::kConstantPoolOffset;
+ RecordMigratedSlot(Memory::Object_at(constant_pool_slot),
+ constant_pool_slot, evacuation_slots_buffer);
} else if (src->IsJSArrayBuffer()) {
heap()->MoveBlock(dst->address(), src->address(), size);
@@ -2764,7 +2747,8 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
Address regular_slots_end =
dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize;
while (regular_slot < regular_slots_end) {
- RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot);
+ RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot,
+ evacuation_slots_buffer);
regular_slot += kPointerSize;
}
@@ -2774,7 +2758,7 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
dst->address() + JSArrayBuffer::kSizeWithInternalFields;
while (internal_field_slot < internal_fields_end) {
RecordMigratedSlot(Memory::Object_at(internal_field_slot),
- internal_field_slot);
+ internal_field_slot, evacuation_slots_buffer);
internal_field_slot += kPointerSize;
}
} else if (FLAG_unbox_double_fields) {
@@ -2791,7 +2775,7 @@ void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src,
Memory::Object_at(dst_slot) = value;
if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
- RecordMigratedSlot(value, dst_slot);
+ RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
}
src_slot += kPointerSize;
@@ -2809,6 +2793,49 @@ void MarkCompactCollector::MigrateObjectRaw(HeapObject* dst, HeapObject* src,
}
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+ SlotsBuffer::SlotType slot_type, Address addr) {
+ switch (slot_type) {
+ case SlotsBuffer::CODE_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+ rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::CELL_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::CELL, 0, NULL);
+ rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::CODE_ENTRY_SLOT: {
+ v->VisitCodeEntry(addr);
+ break;
+ }
+ case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ Code::cast(obj)->CodeIterateBody(v);
+ break;
+ }
+ case SlotsBuffer::DEBUG_TARGET_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, NULL);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::OBJECT_SLOT: {
+ v->VisitPointer(reinterpret_cast<Object**>(addr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor {
@@ -3001,6 +3028,32 @@ void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
#endif
+void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
+ PointersUpdatingVisitor v(heap_);
+ size_t buffer_size = buffer->Size();
+
+ for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
+ SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
+ if (!SlotsBuffer::IsTypedSlot(slot)) {
+ PointersUpdatingVisitor::UpdateSlot(heap_, slot);
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < buffer_size);
+ UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
+ reinterpret_cast<Address>(buffer->Get(slot_idx)));
+ }
+ }
+}
+
+
+void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
+ while (buffer != NULL) {
+ UpdateSlots(buffer);
+ buffer = buffer->next();
+ }
+}
+
+
static void UpdatePointer(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
// The store buffer can still contain stale pointers in dead large objects.
@@ -3033,14 +3086,15 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
OldSpace* old_space = heap()->old_space();
- HeapObject* target;
+ HeapObject* target = nullptr;
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
if (allocation.To(&target)) {
- MigrateObject(target, object, object_size, old_space->identity());
+ MigrateObject(target, object, object_size, old_space->identity(),
+ &migration_slots_buffer_);
// If we end up needing more special cases, we should factor this out.
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap()->PromoteArrayBuffer(target);
+ heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
}
heap()->IncrementPromotedObjectsSize(object_size);
return true;
@@ -3200,6 +3254,9 @@ bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
if (object->IsFixedTypedArrayBase()) {
return static_cast<int>(slot - object->address()) ==
FixedTypedArrayBase::kBasePointerOffset;
+ } else if (object->IsBytecodeArray()) {
+ return static_cast<int>(slot - object->address()) ==
+ BytecodeArray::kConstantPoolOffset;
} else if (object->IsJSArrayBuffer()) {
int off = static_cast<int>(slot - object->address());
return (off >= JSArrayBuffer::BodyDescriptor::kStartOffset &&
@@ -3266,11 +3323,17 @@ void MarkCompactCollector::EvacuateNewSpace() {
}
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
+ SlotsBuffer* evacuation_slots_buffer) {
+ base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
+ evacuation_slots_buffers_.Add(evacuation_slots_buffer);
+}
+
+
+bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
+ Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
AlwaysAllocateScope always_allocate(isolate());
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
- p->SetWasSwept();
int offsets[16];
@@ -3288,21 +3351,20 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
- HeapObject* target_object;
- AllocationResult allocation = space->AllocateRaw(size, alignment);
+ HeapObject* target_object = nullptr;
+ AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
- // If allocation failed, use emergency memory and re-try allocation.
- CHECK(space->HasEmergencyMemory());
- space->UseEmergencyMemory();
- allocation = space->AllocateRaw(size, alignment);
- }
- if (!allocation.To(&target_object)) {
- // OS refused to give us memory.
- V8::FatalProcessOutOfMemory("Evacuation");
- return;
+ // We need to abort compaction for this page. Make sure that we reset
+ // the mark bits for objects that have already been migrated.
+ if (i > 0) {
+ p->markbits()->ClearRange(p->AddressToMarkbitIndex(p->area_start()),
+ p->AddressToMarkbitIndex(object_addr));
+ }
+ return false;
}
- MigrateObject(target_object, object, size, space->identity());
+ MigrateObject(target_object, object, size, target_space->identity(),
+ evacuation_slots_buffer);
DCHECK(object->map_word().IsForwardingAddress());
}
@@ -3310,66 +3372,156 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
*cell = 0;
}
p->ResetLiveBytes();
+ return true;
}
-void MarkCompactCollector::EvacuatePages() {
- int npages = evacuation_candidates_.length();
+int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+ if (!FLAG_parallel_compaction) return 1;
+ // We cap the number of parallel compaction tasks by
+ // - (#cores - 1)
+ // - a value depending on the list of evacuation candidates
+ // - a hard limit
+ const int kPagesPerCompactionTask = 4;
+ const int kMaxCompactionTasks = 8;
+ return Min(kMaxCompactionTasks,
+ Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
+ Max(1, base::SysInfo::NumberOfProcessors() - 1)));
+}
+
+
+void MarkCompactCollector::EvacuatePagesInParallel() {
+ if (evacuation_candidates_.length() == 0) return;
+
+ const int num_tasks = NumberOfParallelCompactionTasks();
+
+ // Set up compaction spaces.
+ CompactionSpaceCollection** compaction_spaces_for_tasks =
+ new CompactionSpaceCollection*[num_tasks];
+ for (int i = 0; i < num_tasks; i++) {
+ compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+ }
+
+ compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
+ heap()->old_space());
+ compaction_spaces_for_tasks[0]
+ ->Get(CODE_SPACE)
+ ->MoveOverFreeMemory(heap()->code_space());
+
+ compaction_in_progress_ = true;
+ // Kick off parallel tasks.
+ for (int i = 1; i < num_tasks; i++) {
+ concurrent_compaction_tasks_active_++;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
+ v8::Platform::kShortRunningTask);
+ }
+
+ // Contribute in main thread. Counter and signal are in principal not needed.
+ concurrent_compaction_tasks_active_++;
+ EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
+ pending_compaction_tasks_semaphore_.Signal();
+
+ WaitUntilCompactionCompleted();
+
+ // Merge back memory (compacted and unused) from compaction spaces.
+ for (int i = 0; i < num_tasks; i++) {
+ heap()->old_space()->MergeCompactionSpace(
+ compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
+ heap()->code_space()->MergeCompactionSpace(
+ compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+ delete compaction_spaces_for_tasks[i];
+ }
+ delete[] compaction_spaces_for_tasks;
+
+ // Finalize sequentially.
+ const int num_pages = evacuation_candidates_.length();
int abandoned_pages = 0;
- for (int i = 0; i < npages; i++) {
+ for (int i = 0; i < num_pages; i++) {
Page* p = evacuation_candidates_[i];
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- DCHECK(static_cast<int>(p->parallel_sweeping()) ==
- MemoryChunk::SWEEPING_DONE);
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- // Allocate emergency memory for the case when compaction fails due to out
- // of memory.
- if (!space->HasEmergencyMemory()) {
- space->CreateEmergencyMemory(); // If the OS lets us.
- }
- if (p->IsEvacuationCandidate()) {
- // During compaction we might have to request a new page in order to free
- // up a page. Check that we actually got an emergency page above so we
- // can guarantee that this succeeds.
- if (space->HasEmergencyMemory()) {
- EvacuateLiveObjectsFromPage(p);
- // Unlink the page from the list of pages here. We must not iterate
- // over that page later (e.g. when scan on scavenge pages are
- // processed). The page itself will be freed later and is still
- // reachable from the evacuation candidates list.
+ switch (p->parallel_compaction_state().Value()) {
+ case MemoryChunk::ParallelCompactingState::kCompactingAborted:
+ // We have partially compacted the page, i.e., some objects may have
+ // moved, others are still in place.
+ // We need to:
+ // - Leave the evacuation candidate flag for later processing of
+ // slots buffer entries.
+ // - Leave the slots buffer there for processing of entries added by
+ // the write barrier.
+ // - Rescan the page as slot recording in the migration buffer only
+ // happens upon moving (which we potentially didn't do).
+ // - Leave the page in the list of pages of a space since we could not
+ // fully evacuate it.
+ DCHECK(p->IsEvacuationCandidate());
+ p->SetFlag(Page::RESCAN_ON_EVACUATION);
+ abandoned_pages++;
+ break;
+ case MemoryChunk::kCompactingFinalize:
+ DCHECK(p->IsEvacuationCandidate());
+ p->SetWasSwept();
p->Unlink();
- } else {
- // Without room for expansion evacuation is not guaranteed to succeed.
- // Pessimistically abandon unevacuated pages.
- for (int j = i; j < npages; j++) {
- Page* page = evacuation_candidates_[j];
- slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
- page->ClearEvacuationCandidate();
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
- }
- abandoned_pages = npages - i;
break;
- }
+ case MemoryChunk::kCompactingDone:
+ DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
+ DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ break;
+ default:
+ // We should not observe kCompactingInProgress, or kCompactingDone.
+ UNREACHABLE();
}
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
- if (npages > 0) {
- // Release emergency memory.
- PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next(); space != NULL;
- space = spaces.next()) {
- if (space->HasEmergencyMemory()) {
- space->FreeEmergencyMemory();
- }
- }
+ if (num_pages > 0) {
if (FLAG_trace_fragmentation) {
if (abandoned_pages != 0) {
PrintF(
- " Abandon %d out of %d page defragmentations due to lack of "
- "memory\n",
- abandoned_pages, npages);
+ " Abandoned (at least partially) %d out of %d page compactions due"
+ " to lack of memory\n",
+ abandoned_pages, num_pages);
} else {
- PrintF(" Defragmented %d pages\n", npages);
+ PrintF(" Compacted %d pages\n", num_pages);
+ }
+ }
+ }
+}
+
+
+void MarkCompactCollector::WaitUntilCompactionCompleted() {
+ while (concurrent_compaction_tasks_active_ > 0) {
+ pending_compaction_tasks_semaphore_.Wait();
+ concurrent_compaction_tasks_active_--;
+ }
+ compaction_in_progress_ = false;
+}
+
+
+void MarkCompactCollector::EvacuatePages(
+ CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer) {
+ for (int i = 0; i < evacuation_candidates_.length(); i++) {
+ Page* p = evacuation_candidates_[i];
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
+ MemoryChunk::kSweepingDone);
+ if (p->parallel_compaction_state().TrySetValue(
+ MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+ if (p->IsEvacuationCandidate()) {
+ DCHECK_EQ(p->parallel_compaction_state().Value(),
+ MemoryChunk::kCompactingInProgress);
+ if (EvacuateLiveObjectsFromPage(
+ p, compaction_spaces->Get(p->owner()->identity()),
+ evacuation_slots_buffer)) {
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingFinalize);
+ } else {
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingAborted);
+ }
+ } else {
+ // There could be popular pages in the list of evacuation candidates
+ // which we do compact.
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
}
}
@@ -3391,49 +3543,6 @@ class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
};
-static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
- SlotsBuffer::SlotType slot_type, Address addr) {
- switch (slot_type) {
- case SlotsBuffer::CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::CELL_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CELL, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::CODE_ENTRY_SLOT: {
- v->VisitCodeEntry(addr);
- break;
- }
- case SlotsBuffer::RELOCATED_CODE_OBJECT: {
- HeapObject* obj = HeapObject::FromAddress(addr);
- Code::cast(obj)->CodeIterateBody(v);
- break;
- }
- case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::OBJECT_SLOT: {
- v->VisitPointer(reinterpret_cast<Object**>(addr));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
@@ -3538,7 +3647,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
// sweeping by the main thread.
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
} else {
p->SetWasSwept();
}
@@ -3546,24 +3655,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
}
-static bool IsOnInvalidatedCodeObject(Address addr) {
- // We did not record any slots in large objects thus
- // we can safely go to the page from the slot address.
- Page* p = Page::FromAddress(addr);
-
- // First check owner's identity because old space is swept concurrently or
- // lazily and might still have non-zero mark-bits on some pages.
- if (p->owner()->identity() != CODE_SPACE) return false;
-
- // In code space only bits on evacuation candidates (but we don't record
- // any slots on them) and under invalidated code objects are non-zero.
- MarkBit mark_bit =
- p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
- return Marking::IsBlackOrGrey(mark_bit);
-}
-
-
void MarkCompactCollector::InvalidateCode(Code* code) {
if (heap_->incremental_marking()->IsCompacting() &&
!ShouldSkipEvacuationSlotRecording(code)) {
@@ -3619,7 +3710,29 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuationScope evacuation_scope(this);
- EvacuatePages();
+ EvacuatePagesInParallel();
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+ UpdateSlotsRecordedIn(migration_slots_buffer_);
+ if (FLAG_trace_fragmentation_verbose) {
+ PrintF(" migration slots buffer: %d\n",
+ SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+ }
+ slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
+ DCHECK(migration_slots_buffer_ == NULL);
+
+ // TODO(hpayer): Process the slots buffers in parallel. This has to be done
+ // after evacuation of all pages finishes.
+ int buffers = evacuation_slots_buffers_.length();
+ for (int i = 0; i < buffers; i++) {
+ SlotsBuffer* buffer = evacuation_slots_buffers_[i];
+ UpdateSlotsRecordedIn(buffer);
+ slots_buffer_allocator_->DeallocateChain(&buffer);
+ }
+ evacuation_slots_buffers_.Rewind(0);
}
// Second pass: find pointers to new space and update them.
@@ -3653,34 +3766,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_);
- if (FLAG_trace_fragmentation_verbose) {
- PrintF(" migration slots buffer: %d\n",
- SlotsBuffer::SizeOfChain(migration_slots_buffer_));
- }
-
- if (compacting_ && was_marked_incrementally_) {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_RESCAN_LARGE_OBJECTS);
- // It's difficult to filter out slots recorded for large objects.
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- // LargeObjectSpace is not swept yet thus we have to skip
- // dead objects explicitly.
- if (!IsMarked(obj)) continue;
-
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- obj->Iterate(&updating_visitor);
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
- }
- }
-
int npages = evacuation_candidates_.length();
{
GCTracer::Scope gc_scope(
@@ -3692,18 +3777,28 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer());
+ UpdateSlotsRecordedIn(p->slots_buffer());
if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
+ slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
- } else {
+ }
+
+ if (p->IsEvacuationCandidate() &&
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ // Case where we've aborted compacting a page. Clear the flag here to
+ // avoid release the page later on.
+ p->ClearEvacuationCandidate();
+ }
+
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
reinterpret_cast<intptr_t>(p));
@@ -3750,9 +3845,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
heap_->isolate()->inner_pointer_to_code_cache()->Flush();
- slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
- DCHECK(migration_slots_buffer_ == NULL);
-
// The hashing of weak_object_to_code_table is no longer valid.
heap()->weak_object_to_code_table()->Rehash(
heap()->isolate()->factory()->undefined_value());
@@ -3779,12 +3871,12 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
+ heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
heap()->FreeQueuedChunks();
}
@@ -4206,11 +4298,12 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0;
if (page->TryLock()) {
// If this page was already swept in the meantime, we can return here.
- if (page->parallel_sweeping() != MemoryChunk::SWEEPING_PENDING) {
+ if (page->parallel_sweeping_state().Value() !=
+ MemoryChunk::kSweepingPending) {
page->mutex()->Unlock();
return 0;
}
- page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS);
+ page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
FreeList* free_list;
FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) {
@@ -4251,7 +4344,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
while (it.has_next()) {
Page* p = it.next();
- DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+ DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearWasSwept();
@@ -4305,7 +4398,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
space->IncreaseUnsweptFreeBytes(p);
}
space->set_end_of_unswept_pages(p);
@@ -4337,9 +4430,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
PrintF("SweepSpace: %s (%d pages swept)\n",
AllocationSpaceName(space->identity()), pages_swept);
}
-
- // Give pages that are queued to be freed back to the OS.
- heap()->FreeQueuedChunks();
}
@@ -4356,11 +4446,6 @@ void MarkCompactCollector::SweepSpaces() {
MoveEvacuationCandidatesToEndOfPagesList();
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
{
{
GCTracer::Scope sweep_scope(heap()->tracer(),
@@ -4383,22 +4468,25 @@ void MarkCompactCollector::SweepSpaces() {
}
}
+ // Deallocate unmarked large objects.
+ heap_->lo_space()->FreeUnmarkedObjects();
+
+ // Give pages that are queued to be freed back to the OS. Invalid store
+ // buffer entries are already filter out. We can just release the memory.
+ heap()->FreeQueuedChunks();
+
EvacuateNewSpaceAndCandidates();
- // NOTE: ArrayBuffers must be evacuated first, before freeing them. Otherwise
- // not yet discovered buffers for scavenge will have all of them, and they
- // will be erroneously freed.
- heap()->FreeDeadArrayBuffers(false);
+ // EvacuateNewSpaceAndCandidates iterates over new space objects and for
+ // ArrayBuffers either re-registers them as live or promotes them. This is
+ // needed to properly free them.
+ heap()->array_buffer_tracker()->FreeDead(false);
- // Deallocate unmarked objects and clear marked bits for marked objects.
- heap_->lo_space()->FreeUnmarkedObjects();
+ // Clear the marking state of live large objects.
+ heap_->lo_space()->ClearMarkingStateOfLiveObjects();
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- code_range->ReserveEmergencyBlock();
- }
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
@@ -4417,11 +4505,12 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+ if (p->parallel_sweeping_state().Value() ==
+ MemoryChunk::kSweepingFinalize) {
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
p->SetWasSwept();
}
- DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+ DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
}
}
@@ -4472,163 +4561,6 @@ void MarkCompactCollector::Initialize() {
}
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
- return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, SlotType type,
- Address addr, AdditionMode mode) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- DCHECK(buffer->HasSpaceForTypedSlot());
- buffer->Add(reinterpret_cast<ObjectSlot>(type));
- buffer->Add(reinterpret_cast<ObjectSlot>(addr));
- return true;
-}
-
-
-void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
- // Remove entries by replacing them with an old-space slot containing a smi
- // that is located in an unmovable page.
- const ObjectSlot kRemovedEntry = HeapObject::RawField(
- heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
- DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
- ->NeverEvacuate());
-
- while (buffer != NULL) {
- SlotsBuffer::ObjectSlot* slots = buffer->slots_;
- intptr_t slots_count = buffer->idx_;
-
- for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
- ObjectSlot slot = slots[slot_idx];
- if (!IsTypedSlot(slot)) {
- Object* object = *slot;
- if ((object->IsHeapObject() && heap->InNewSpace(object)) ||
- !heap->mark_compact_collector()->IsSlotInLiveObject(
- reinterpret_cast<Address>(slot))) {
- slots[slot_idx] = kRemovedEntry;
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < slots_count);
- }
- }
- buffer = buffer->next();
- }
-}
-
-
-void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
- Address start_slot, Address end_slot) {
- // Remove entries by replacing them with an old-space slot containing a smi
- // that is located in an unmovable page.
- const ObjectSlot kRemovedEntry = HeapObject::RawField(
- heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
- DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
- ->NeverEvacuate());
-
- while (buffer != NULL) {
- SlotsBuffer::ObjectSlot* slots = buffer->slots_;
- intptr_t slots_count = buffer->idx_;
- bool is_typed_slot = false;
-
- for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
- ObjectSlot slot = slots[slot_idx];
- if (!IsTypedSlot(slot)) {
- Address slot_address = reinterpret_cast<Address>(slot);
- if (slot_address >= start_slot && slot_address < end_slot) {
- slots[slot_idx] = kRemovedEntry;
- if (is_typed_slot) {
- slots[slot_idx - 1] = kRemovedEntry;
- }
- }
- is_typed_slot = false;
- } else {
- is_typed_slot = true;
- DCHECK(slot_idx < slots_count);
- }
- }
- buffer = buffer->next();
- }
-}
-
-
-void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
- while (buffer != NULL) {
- SlotsBuffer::ObjectSlot* slots = buffer->slots_;
- intptr_t slots_count = buffer->idx_;
-
- for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
- ObjectSlot slot = slots[slot_idx];
- if (!IsTypedSlot(slot)) {
- Object* object = *slot;
- if (object->IsHeapObject()) {
- HeapObject* heap_object = HeapObject::cast(object);
- CHECK(!heap->InNewSpace(object));
- heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
- reinterpret_cast<Address>(slot), heap_object);
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < slots_count);
- }
- }
- buffer = buffer->next();
- }
-}
-
-
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTarget(rmode)) {
- return SlotsBuffer::CODE_TARGET_SLOT;
- } else if (RelocInfo::IsCell(rmode)) {
- return SlotsBuffer::CELL_TARGET_SLOT;
- } else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
- return SlotsBuffer::DEBUG_TARGET_SLOT;
- }
- UNREACHABLE();
- return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- RelocInfo::Mode rmode = rinfo->rmode();
- if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == NULL ||
- !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- Address addr = rinfo->pc();
- SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
- if (rinfo->IsInConstantPool()) {
- addr = rinfo->constant_pool_entry_address();
- if (RelocInfo::IsCodeTarget(rmode)) {
- slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
- } else {
- DCHECK(RelocInfo::IsEmbeddedObject(rmode));
- slot_type = SlotsBuffer::OBJECT_SLOT;
- }
- }
- bool success = SlotsBuffer::AddTo(
- &slots_buffer_allocator_, target_page->slots_buffer_address(),
- slot_type, addr, SlotsBuffer::FAIL_ON_OVERFLOW);
- if (!success) {
- EvictPopularEvacuationCandidate(target_page);
- }
- }
-}
-
-
void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
if (FLAG_trace_fragmentation) {
PrintF("Page %p is too popular. Disabling evacuation.\n",
@@ -4657,7 +4589,7 @@ void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(object)) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(),
SlotsBuffer::CODE_ENTRY_SLOT, slot,
SlotsBuffer::FAIL_ON_OVERFLOW)) {
@@ -4681,70 +4613,5 @@ void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
}
}
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
- SlotsBuffer::ObjectSlot slot) {
- return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void SlotsBuffer::UpdateSlots(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- } else {
- ++slot_idx;
- DCHECK(slot_idx < idx_);
- UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
-}
-
-
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < idx_);
- Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
- if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
- }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
- return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
- delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
- SlotsBuffer* buffer = *buffer_address;
- while (buffer != NULL) {
- SlotsBuffer* next_buffer = buffer->next();
- DeallocateBuffer(buffer);
- buffer = next_buffer;
- }
- *buffer_address = NULL;
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 843e73d8e7..724650c1c4 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -24,6 +24,8 @@ class CodeFlusher;
class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
+class SlotsBuffer;
+class SlotsBufferAllocator;
class Marking : public AllStatic {
@@ -117,10 +119,6 @@ class Marking : public AllStatic {
markbit.Next().Set();
}
- static void SetAllMarkBitsInRange(MarkBit start, MarkBit end);
- static void ClearAllMarkBitsOfCellsContainedInRange(MarkBit start,
- MarkBit end);
-
static void TransferMark(Heap* heap, Address old_start, Address new_start);
#ifdef DEBUG
@@ -262,165 +260,6 @@ class MarkingDeque {
};
-class SlotsBufferAllocator {
- public:
- SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
- void DeallocateBuffer(SlotsBuffer* buffer);
-
- void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-// - Untyped slots are expected to contain a tagged object pointer.
-// They are recorded by an address.
-// - Typed slots are expected to contain an encoded pointer to a heap
-// object where the way of encoding depends on the type of the slot.
-// They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
- typedef Object** ObjectSlot;
-
- explicit SlotsBuffer(SlotsBuffer* next_buffer)
- : idx_(0), chain_length_(1), next_(next_buffer) {
- if (next_ != NULL) {
- chain_length_ = next_->chain_length_ + 1;
- }
- }
-
- ~SlotsBuffer() {}
-
- void Add(ObjectSlot slot) {
- DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
-#ifdef DEBUG
- if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
- DCHECK_NOT_NULL(*slot);
- }
-#endif
- slots_[idx_++] = slot;
- }
-
- // Should be used for testing only.
- ObjectSlot Get(intptr_t i) {
- DCHECK(i >= 0 && i < kNumberOfElements);
- return slots_[i];
- }
-
- enum SlotType {
- EMBEDDED_OBJECT_SLOT,
- OBJECT_SLOT,
- RELOCATED_CODE_OBJECT,
- CELL_TARGET_SLOT,
- CODE_TARGET_SLOT,
- CODE_ENTRY_SLOT,
- DEBUG_TARGET_SLOT,
- NUMBER_OF_SLOT_TYPES
- };
-
- static const char* SlotTypeToString(SlotType type) {
- switch (type) {
- case EMBEDDED_OBJECT_SLOT:
- return "EMBEDDED_OBJECT_SLOT";
- case OBJECT_SLOT:
- return "OBJECT_SLOT";
- case RELOCATED_CODE_OBJECT:
- return "RELOCATED_CODE_OBJECT";
- case CELL_TARGET_SLOT:
- return "CELL_TARGET_SLOT";
- case CODE_TARGET_SLOT:
- return "CODE_TARGET_SLOT";
- case CODE_ENTRY_SLOT:
- return "CODE_ENTRY_SLOT";
- case DEBUG_TARGET_SLOT:
- return "DEBUG_TARGET_SLOT";
- case NUMBER_OF_SLOT_TYPES:
- return "NUMBER_OF_SLOT_TYPES";
- }
- return "UNKNOWN SlotType";
- }
-
- void UpdateSlots(Heap* heap);
-
- void UpdateSlotsWithFilter(Heap* heap);
-
- SlotsBuffer* next() { return next_; }
-
- static int SizeOfChain(SlotsBuffer* buffer) {
- if (buffer == NULL) return 0;
- return static_cast<int>(buffer->idx_ +
- (buffer->chain_length_ - 1) * kNumberOfElements);
- }
-
- inline bool IsFull() { return idx_ == kNumberOfElements; }
-
- inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
-
- static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer) {
- while (buffer != NULL) {
- buffer->UpdateSlots(heap);
- buffer = buffer->next();
- }
- }
-
- enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
-
- static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
- return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
- }
-
- INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, ObjectSlot slot,
- AdditionMode mode)) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || buffer->IsFull()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
- }
- buffer->Add(slot);
- return true;
- }
-
- static bool IsTypedSlot(ObjectSlot slot);
-
- static bool AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, SlotType type, Address addr,
- AdditionMode mode);
-
- // Eliminates all stale entries from the slots buffer, i.e., slots that
- // are not part of live objects anymore. This method must be called after
- // marking, when the whole transitive closure is known and must be called
- // before sweeping when mark bits are still intact.
- static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
-
- // Eliminate all slots that are within the given address range.
- static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
- Address start_slot, Address end_slot);
-
- // Ensures that there are no invalid slots in the chain of slots buffers.
- static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
-
- static const int kNumberOfElements = 1021;
-
- private:
- static const int kChainLengthThreshold = 15;
-
- intptr_t idx_;
- intptr_t chain_length_;
- SlotsBuffer* next_;
- ObjectSlot slots_[kNumberOfElements];
-};
-
-
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
@@ -559,6 +398,7 @@ class MarkCompactCollector {
enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
#ifdef VERIFY_HEAP
+ void VerifyValidStoreAndSlotsBufferEntries();
void VerifyMarkbitsAreClean();
static void VerifyMarkbitsAreClean(PagedSpace* space);
static void VerifyMarkbitsAreClean(NewSpace* space);
@@ -579,16 +419,21 @@ class MarkCompactCollector {
void RecordRelocSlot(RelocInfo* rinfo, Object* target);
void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
+ INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
+ INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
+ Object* target));
- INLINE(void RecordSlot(
- HeapObject* object, Object** slot, Object* target,
- SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
+ void UpdateSlots(SlotsBuffer* buffer);
+ void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
void MigrateObject(HeapObject* dst, HeapObject* src, int size,
- AllocationSpace to_old_space);
+ AllocationSpace to_old_space,
+ SlotsBuffer** evacuation_slots_buffer);
- void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size);
- void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size);
+ void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size,
+ SlotsBuffer** evacuation_slots_buffer);
+ void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size,
+ SlotsBuffer** evacuation_slots_buffer);
void MigrateObjectRaw(HeapObject* dst, HeapObject* src, int size);
bool TryPromoteObject(HeapObject* object, int object_size);
@@ -668,6 +513,7 @@ class MarkCompactCollector {
void RemoveObjectSlots(Address start_slot, Address end_slot);
private:
+ class CompactionTask;
class SweeperTask;
explicit MarkCompactCollector(Heap* heap);
@@ -696,20 +542,11 @@ class MarkCompactCollector {
MarkingParity marking_parity_;
- // True if we are collecting slots to perform evacuation from evacuation
- // candidates.
- bool compacting_;
-
bool was_marked_incrementally_;
- // True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_in_progress_;
-
- base::Semaphore pending_sweeper_jobs_semaphore_;
-
bool evacuation_;
- SlotsBufferAllocator slots_buffer_allocator_;
+ SlotsBufferAllocator* slots_buffer_allocator_;
SlotsBuffer* migration_slots_buffer_;
@@ -861,9 +698,21 @@ class MarkCompactCollector {
void EvacuateNewSpace();
- void EvacuateLiveObjectsFromPage(Page* p);
+ bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ void AddEvacuationSlotsBufferSynchronized(
+ SlotsBuffer* evacuation_slots_buffer);
+
+ void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ void EvacuatePagesInParallel();
+
+ // The number of parallel compaction tasks, including the main thread.
+ int NumberOfParallelCompactionTasks();
- void EvacuatePages();
+ void WaitUntilCompactionCompleted();
void EvacuateNewSpaceAndCandidates();
@@ -882,7 +731,16 @@ class MarkCompactCollector {
void ParallelSweepSpaceComplete(PagedSpace* space);
// Updates store buffer and slot buffer for a pointer in a migrating object.
- void RecordMigratedSlot(Object* value, Address slot);
+ void RecordMigratedSlot(Object* value, Address slot,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ // Adds the code entry slot to the slots buffer.
+ void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
+ SlotsBuffer** evacuation_slots_buffer);
+
+ // Adds the slot of a moved code object.
+ void RecordMigratedCodeObjectSlot(Address code_object,
+ SlotsBuffer** evacuation_slots_buffer);
#ifdef DEBUG
friend class MarkObjectVisitor;
@@ -901,10 +759,37 @@ class MarkCompactCollector {
List<Page*> evacuation_candidates_;
+ // The evacuation_slots_buffers_ are used by the compaction threads.
+ // When a compaction task finishes, it uses
+ // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
+ // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
+ // lock.
+ base::Mutex evacuation_slots_buffers_mutex_;
+ List<SlotsBuffer*> evacuation_slots_buffers_;
+
base::SmartPointer<FreeList> free_list_old_space_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
+ // True if we are collecting slots to perform evacuation from evacuation
+ // candidates.
+ bool compacting_;
+
+ // True if concurrent or parallel sweeping is currently in progress.
+ bool sweeping_in_progress_;
+
+ // True if parallel compaction is currently in progress.
+ bool compaction_in_progress_;
+
+ // Semaphore used to synchronize sweeper tasks.
+ base::Semaphore pending_sweeper_tasks_semaphore_;
+
+ // Semaphore used to synchronize compaction tasks.
+ base::Semaphore pending_compaction_tasks_semaphore_;
+
+ // Number of active compaction tasks (including main thread).
+ intptr_t concurrent_compaction_tasks_active_;
+
friend class Heap;
};
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 25378b5911..45d6bd3d7f 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -5,7 +5,8 @@
#include "src/heap/memory-reducer.h"
#include "src/flags.h"
-#include "src/heap/heap.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/heap-inl.h"
#include "src/utils.h"
#include "src/v8.h"
@@ -45,12 +46,35 @@ void MemoryReducer::NotifyTimer(const Event& event) {
if (state_.action == kRun) {
DCHECK(heap()->incremental_marking()->IsStopped());
DCHECK(FLAG_incremental_marking);
- heap()->StartIdleIncrementalMarking();
if (FLAG_trace_gc_verbose) {
PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
state_.started_gcs);
}
+ if (heap()->ShouldOptimizeForMemoryUsage()) {
+ // Do full GC if memory usage has higher priority than latency. This is
+ // important for background tabs that do not send idle notifications.
+ heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
+ "memory reducer");
+ } else {
+ heap()->StartIdleIncrementalMarking();
+ }
} else if (state_.action == kWait) {
+ if (!heap()->incremental_marking()->IsStopped() &&
+ heap()->ShouldOptimizeForMemoryUsage()) {
+ // Make progress with pending incremental marking if memory usage has
+ // higher priority than latency. This is important for background tabs
+ // that do not send idle notifications.
+ const int kIncrementalMarkingDelayMs = 500;
+ double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
+ kIncrementalMarkingDelayMs;
+ heap()->incremental_marking()->AdvanceIncrementalMarking(
+ 0, deadline, i::IncrementalMarking::StepActions(
+ i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+ i::IncrementalMarking::FORCE_MARKING,
+ i::IncrementalMarking::FORCE_COMPLETION));
+ heap()->FinalizeIncrementalMarkingIfComplete(
+ "Memory reducer: finalize incremental marking");
+ }
// Re-schedule the timer.
ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
if (FLAG_trace_gc_verbose) {
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
new file mode 100644
index 0000000000..195723e86d
--- /dev/null
+++ b/deps/v8/src/heap/object-stats.cc
@@ -0,0 +1,258 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/object-stats.h"
+
+#include "src/counters.h"
+#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+static base::LazyMutex object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+
+
+void ObjectStats::ClearObjectStats(bool clear_last_time_stats) {
+ memset(object_counts_, 0, sizeof(object_counts_));
+ memset(object_sizes_, 0, sizeof(object_sizes_));
+ if (clear_last_time_stats) {
+ memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
+ memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+ }
+}
+
+
+void ObjectStats::TraceObjectStat(const char* name, int count, int size,
+ double time) {
+ int ms_count = heap()->ms_count();
+ PrintIsolate(isolate(),
+ "heap:%p, time:%f, gc:%d, type:%s, count:%d, size:%d\n",
+ static_cast<void*>(heap()), time, ms_count, name, count, size);
+}
+
+
+void ObjectStats::TraceObjectStats() {
+ base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
+ int index;
+ int count;
+ int size;
+ int total_size = 0;
+ double time = isolate()->time_millis_since_init();
+#define TRACE_OBJECT_COUNT(name) \
+ count = static_cast<int>(object_counts_[name]); \
+ size = static_cast<int>(object_sizes_[name]) / KB; \
+ total_size += size; \
+ TraceObjectStat(#name, count, size, time);
+ INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+#define TRACE_OBJECT_COUNT(name) \
+ index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
+ count = static_cast<int>(object_counts_[index]); \
+ size = static_cast<int>(object_sizes_[index]) / KB; \
+ TraceObjectStat("*CODE_" #name, count, size, time);
+ CODE_KIND_LIST(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+#define TRACE_OBJECT_COUNT(name) \
+ index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
+ count = static_cast<int>(object_counts_[index]); \
+ size = static_cast<int>(object_sizes_[index]) / KB; \
+ TraceObjectStat("*FIXED_ARRAY_" #name, count, size, time);
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+#define TRACE_OBJECT_COUNT(name) \
+ index = \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+ count = static_cast<int>(object_counts_[index]); \
+ size = static_cast<int>(object_sizes_[index]) / KB; \
+ TraceObjectStat("*CODE_AGE_" #name, count, size, time);
+ CODE_AGE_LIST_COMPLETE(TRACE_OBJECT_COUNT)
+#undef TRACE_OBJECT_COUNT
+}
+
+
+void ObjectStats::CheckpointObjectStats() {
+ base::LockGuard<base::Mutex> lock_guard(object_stats_mutex.Pointer());
+ Counters* counters = isolate()->counters();
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ counters->count_of_##name()->Increment( \
+ static_cast<int>(object_counts_[name])); \
+ counters->count_of_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[name])); \
+ counters->size_of_##name()->Increment( \
+ static_cast<int>(object_sizes_[name])); \
+ counters->size_of_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[name]));
+ INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+ int index;
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_CODE_KIND_SUB_TYPE + Code::name; \
+ counters->count_of_CODE_TYPE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_TYPE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_TYPE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_TYPE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = FIRST_FIXED_ARRAY_SUB_TYPE + name; \
+ counters->count_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_FIXED_ARRAY_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
+ index = \
+ FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+ counters->count_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_counts_[index])); \
+ counters->count_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_counts_last_time_[index])); \
+ counters->size_of_CODE_AGE_##name()->Increment( \
+ static_cast<int>(object_sizes_[index])); \
+ counters->size_of_CODE_AGE_##name()->Decrement( \
+ static_cast<int>(object_sizes_last_time_[index]));
+ CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+
+ MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ ClearObjectStats();
+}
+
+
+Isolate* ObjectStats::isolate() { return heap()->isolate(); }
+
+
+void ObjectStatsVisitor::CountFixedArray(
+ FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type) {
+ Heap* heap = fixed_array->map()->GetHeap();
+ if (fixed_array->map() != heap->fixed_cow_array_map() &&
+ fixed_array->map() != heap->fixed_double_array_map() &&
+ fixed_array != heap->empty_fixed_array()) {
+ if (fixed_array->IsDictionary()) {
+ heap->object_stats_->RecordFixedArraySubTypeStats(dictionary_type,
+ fixed_array->Size());
+ } else {
+ heap->object_stats_->RecordFixedArraySubTypeStats(fast_type,
+ fixed_array->Size());
+ }
+ }
+}
+
+
+void ObjectStatsVisitor::VisitBase(VisitorId id, Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ int object_size = obj->Size();
+ heap->object_stats_->RecordObjectStats(map->instance_type(), object_size);
+ table_.GetVisitorById(id)(map, obj);
+ if (obj->IsJSObject()) {
+ JSObject* object = JSObject::cast(obj);
+ CountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
+ FAST_ELEMENTS_SUB_TYPE);
+ CountFixedArray(object->properties(), DICTIONARY_PROPERTIES_SUB_TYPE,
+ FAST_PROPERTIES_SUB_TYPE);
+ }
+}
+
+
+template <ObjectStatsVisitor::VisitorId id>
+void ObjectStatsVisitor::Visit(Map* map, HeapObject* obj) {
+ VisitBase(id, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitMap>(Map* map,
+ HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ Map* map_obj = Map::cast(obj);
+ DCHECK(map->instance_type() == MAP_TYPE);
+ DescriptorArray* array = map_obj->instance_descriptors();
+ if (map_obj->owns_descriptors() && array != heap->empty_descriptor_array()) {
+ int fixed_array_size = array->Size();
+ heap->object_stats_->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (TransitionArray::IsFullTransitionArray(map_obj->raw_transitions())) {
+ int fixed_array_size =
+ TransitionArray::cast(map_obj->raw_transitions())->Size();
+ heap->object_stats_->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
+ fixed_array_size);
+ }
+ if (map_obj->has_code_cache()) {
+ CodeCache* cache = CodeCache::cast(map_obj->code_cache());
+ heap->object_stats_->RecordFixedArraySubTypeStats(
+ MAP_CODE_CACHE_SUB_TYPE, cache->default_cache()->Size());
+ if (!cache->normal_type_cache()->IsUndefined()) {
+ heap->object_stats_->RecordFixedArraySubTypeStats(
+ MAP_CODE_CACHE_SUB_TYPE,
+ FixedArray::cast(cache->normal_type_cache())->Size());
+ }
+ }
+ VisitBase(kVisitMap, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitCode>(
+ Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ int object_size = obj->Size();
+ DCHECK(map->instance_type() == CODE_TYPE);
+ Code* code_obj = Code::cast(obj);
+ heap->object_stats_->RecordCodeSubTypeStats(code_obj->kind(),
+ code_obj->GetAge(), object_size);
+ VisitBase(kVisitCode, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitSharedFunctionInfo>(
+ Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+ if (sfi->scope_info() != heap->empty_fixed_array()) {
+ heap->object_stats_->RecordFixedArraySubTypeStats(
+ SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
+ }
+ VisitBase(kVisitSharedFunctionInfo, map, obj);
+}
+
+
+template <>
+void ObjectStatsVisitor::Visit<ObjectStatsVisitor::kVisitFixedArray>(
+ Map* map, HeapObject* obj) {
+ Heap* heap = map->GetHeap();
+ FixedArray* fixed_array = FixedArray::cast(obj);
+ if (fixed_array == heap->string_table()) {
+ heap->object_stats_->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
+ fixed_array->Size());
+ }
+ VisitBase(kVisitFixedArray, map, obj);
+}
+
+
+void ObjectStatsVisitor::Initialize(VisitorDispatchTable<Callback>* original) {
+ // Copy the original visitor table to make call-through possible. After we
+ // preserved a copy locally, we patch the original table to call us.
+ table_.CopyFrom(original);
+#define COUNT_FUNCTION(id) original->Register(kVisit##id, Visit<kVisit##id>);
+ VISITOR_ID_LIST(COUNT_FUNCTION)
+#undef COUNT_FUNCTION
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/object-stats.h b/deps/v8/src/heap/object-stats.h
new file mode 100644
index 0000000000..e2dcfaa4b7
--- /dev/null
+++ b/deps/v8/src/heap/object-stats.h
@@ -0,0 +1,102 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_OBJECT_STATS_H_
+#define V8_HEAP_OBJECT_STATS_H_
+
+#include "src/heap/heap.h"
+#include "src/heap/objects-visiting.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class ObjectStats {
+ public:
+ explicit ObjectStats(Heap* heap) : heap_(heap) {}
+
+ // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+ // stored in a contiguous linear buffer. Stats groups are stored one after
+ // another.
+ enum {
+ FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+ FIRST_FIXED_ARRAY_SUB_TYPE =
+ FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
+ FIRST_CODE_AGE_SUB_TYPE =
+ FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+ OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
+ };
+
+ void ClearObjectStats(bool clear_last_time_stats = false);
+
+ void TraceObjectStats();
+ void TraceObjectStat(const char* name, int count, int size, double time);
+ void CheckpointObjectStats();
+
+ void RecordObjectStats(InstanceType type, size_t size) {
+ DCHECK(type <= LAST_TYPE);
+ object_counts_[type]++;
+ object_sizes_[type] += size;
+ }
+
+ void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+ int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+ int code_age_index =
+ FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
+ DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
+ code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
+ DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
+ code_age_index < OBJECT_STATS_COUNT);
+ object_counts_[code_sub_type_index]++;
+ object_sizes_[code_sub_type_index] += size;
+ object_counts_[code_age_index]++;
+ object_sizes_[code_age_index] += size;
+ }
+
+ void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+ DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+ object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+ object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
+ }
+
+ size_t object_count_last_gc(size_t index) {
+ return object_counts_last_time_[index];
+ }
+
+ size_t object_size_last_gc(size_t index) {
+ return object_sizes_last_time_[index];
+ }
+
+ Isolate* isolate();
+ Heap* heap() { return heap_; }
+
+ private:
+ Heap* heap_;
+
+ // Object counts and used memory by InstanceType
+ size_t object_counts_[OBJECT_STATS_COUNT];
+ size_t object_counts_last_time_[OBJECT_STATS_COUNT];
+ size_t object_sizes_[OBJECT_STATS_COUNT];
+ size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
+};
+
+
+class ObjectStatsVisitor : public StaticMarkingVisitor<ObjectStatsVisitor> {
+ public:
+ static void Initialize(VisitorDispatchTable<Callback>* original);
+
+ static void VisitBase(VisitorId id, Map* map, HeapObject* obj);
+
+ static void CountFixedArray(FixedArrayBase* fixed_array,
+ FixedArraySubInstanceType fast_type,
+ FixedArraySubInstanceType dictionary_type);
+
+ template <VisitorId id>
+ static inline void Visit(Map* map, HeapObject* obj);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_OBJECT_STATS_H_
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 9a2e2ee147..55734fd463 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_VISITING_INL_H_
#define V8_OBJECTS_VISITING_INL_H_
+#include "src/heap/array-buffer-tracker.h"
#include "src/heap/objects-visiting.h"
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
@@ -94,8 +95,7 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
JSArrayBuffer::JSArrayBufferIterateBody<
StaticNewSpaceVisitor<StaticVisitor> >(heap, object);
if (!JSArrayBuffer::cast(object)->is_external()) {
- heap->RegisterLiveArrayBuffer(true,
- JSArrayBuffer::cast(object)->backing_store());
+ heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
}
return JSArrayBuffer::kSizeWithInternalFields;
}
@@ -124,6 +124,17 @@ int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitBytecodeArray(
+ Map* map, HeapObject* object) {
+ VisitPointers(
+ map->GetHeap(), object,
+ HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
+ HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+ return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
+}
+
+
+template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitShortcutCandidate,
&FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
@@ -155,7 +166,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
- table_.Register(kVisitBytecodeArray, &DataObjectVisitor::Visit);
+ table_.Register(kVisitBytecodeArray, &VisitBytecodeArray);
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
@@ -343,9 +354,21 @@ void StaticMarkingVisitor<StaticVisitor>::VisitWeakCell(Map* map,
// We can ignore weak cells with cleared values because they will always
// contain smi zero.
if (weak_cell->next_cleared() && !weak_cell->cleared()) {
- weak_cell->set_next(heap->encountered_weak_cells(),
- UPDATE_WEAK_WRITE_BARRIER);
- heap->set_encountered_weak_cells(weak_cell);
+ HeapObject* value = HeapObject::cast(weak_cell->value());
+ if (MarkCompactCollector::IsMarked(value)) {
+ // Weak cells with live values are directly processed here to reduce
+ // the processing time of weak cells during the main GC pause.
+ Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+ map->GetHeap()->mark_compact_collector()->RecordSlot(weak_cell, slot,
+ *slot);
+ } else {
+ // If we do not know about liveness of values of weak cells, we have to
+ // process them when we know the liveness of the whole transitive
+ // closure.
+ weak_cell->set_next(heap->encountered_weak_cells(),
+ UPDATE_WEAK_WRITE_BARRIER);
+ heap->set_encountered_weak_cells(weak_cell);
+ }
}
}
@@ -516,9 +539,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
Heap* heap = map->GetHeap();
JSArrayBuffer::JSArrayBufferIterateBody<StaticVisitor>(heap, object);
- if (!JSArrayBuffer::cast(object)->is_external()) {
- heap->RegisterLiveArrayBuffer(false,
- JSArrayBuffer::cast(object)->backing_store());
+ if (!JSArrayBuffer::cast(object)->is_external() &&
+ !heap->InNewSpace(object)) {
+ heap->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object));
}
}
@@ -544,6 +567,16 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitBytecodeArray(
+ Map* map, HeapObject* object) {
+ StaticVisitor::VisitPointers(
+ map->GetHeap(), object,
+ HeapObject::RawField(object, BytecodeArray::kConstantPoolOffset),
+ HeapObject::RawField(object, BytecodeArray::kHeaderSize));
+}
+
+
+template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
Map* map) {
Object* raw_transitions = map->raw_transitions();
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 602d37b805..902a96a644 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -137,6 +137,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_MESSAGE_OBJECT_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
instance_size, has_unboxed_fields);
@@ -225,6 +226,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
case JS_REGEXP_TYPE:
@@ -271,13 +273,15 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
case SYMBOL_TYPE:
Symbol::BodyDescriptor::IterateBody(this, v);
break;
+ case BYTECODE_ARRAY_TYPE:
+ reinterpret_cast<BytecodeArray*>(this)->BytecodeArrayIterateBody(v);
+ break;
case HEAP_NUMBER_TYPE:
case MUTABLE_HEAP_NUMBER_TYPE:
case SIMD128_VALUE_TYPE:
case FILLER_TYPE:
case BYTE_ARRAY_TYPE:
- case BYTECODE_ARRAY_TYPE:
case FREE_SPACE_TYPE:
break;
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index 5b150cf199..1eba88731b 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -6,6 +6,7 @@
#define V8_OBJECTS_VISITING_H_
#include "src/allocation.h"
+#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#include "src/layout-descriptor.h"
@@ -99,7 +100,6 @@ class StaticVisitorBase : public AllStatic {
kVisitDataObject = kVisitDataObject2,
kVisitJSObject = kVisitJSObject2,
kVisitStruct = kVisitStruct2,
- kMinObjectSizeInWords = 2
};
// Visitor ID should fit in one byte.
@@ -121,15 +121,15 @@ class StaticVisitorBase : public AllStatic {
DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
(base == kVisitJSObject));
DCHECK(IsAligned(object_size, kPointerSize));
- DCHECK(kMinObjectSizeInWords * kPointerSize <= object_size);
+ DCHECK(Heap::kMinObjectSizeInWords * kPointerSize <= object_size);
DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
DCHECK(!has_unboxed_fields || (base == kVisitJSObject));
if (has_unboxed_fields) return generic;
- int visitor_id =
- Min(base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords,
- static_cast<int>(generic));
+ int visitor_id = Min(
+ base + (object_size >> kPointerSizeLog2) - Heap::kMinObjectSizeInWords,
+ static_cast<int>(generic));
return static_cast<VisitorId>(visitor_id);
}
@@ -171,8 +171,7 @@ class VisitorDispatchTable {
template <typename Visitor, StaticVisitorBase::VisitorId base,
StaticVisitorBase::VisitorId generic>
void RegisterSpecializations() {
- STATIC_ASSERT((generic - base + StaticVisitorBase::kMinObjectSizeInWords) ==
- 10);
+ STATIC_ASSERT((generic - base + Heap::kMinObjectSizeInWords) == 10);
RegisterSpecialization<Visitor, base, generic, 2>();
RegisterSpecialization<Visitor, base, generic, 3>();
RegisterSpecialization<Visitor, base, generic, 4>();
@@ -317,10 +316,6 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
}
- INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object)) {
- return reinterpret_cast<BytecodeArray*>(object)->BytecodeArraySize();
- }
-
INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
return FixedDoubleArray::SizeFor(length);
@@ -351,6 +346,7 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
+ INLINE(static int VisitBytecodeArray(Map* map, HeapObject* object));
class DataObjectVisitor {
public:
@@ -435,6 +431,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
+ INLINE(static void VisitBytecodeArray(Map* map, HeapObject* object));
// Mark pointers in a Map and its TransitionArray together, possibly
// treating transitions or back pointers weak.
diff --git a/deps/v8/src/heap/scavenge-job.cc b/deps/v8/src/heap/scavenge-job.cc
new file mode 100644
index 0000000000..c3804436fb
--- /dev/null
+++ b/deps/v8/src/heap/scavenge-job.cc
@@ -0,0 +1,116 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/scavenge-job.h"
+
+#include "src/base/platform/time.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+
+const double ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace = 0.8;
+
+void ScavengeJob::IdleTask::RunInternal(double deadline_in_seconds) {
+ Heap* heap = isolate_->heap();
+ double deadline_in_ms =
+ deadline_in_seconds *
+ static_cast<double>(base::Time::kMillisecondsPerSecond);
+ double start_ms = heap->MonotonicallyIncreasingTimeInMs();
+ double idle_time_in_ms = deadline_in_ms - start_ms;
+ size_t scavenge_speed_in_bytes_per_ms =
+ static_cast<size_t>(heap->tracer()->ScavengeSpeedInBytesPerMillisecond());
+ size_t new_space_size = heap->new_space()->Size();
+ size_t new_space_capacity = heap->new_space()->Capacity();
+
+ job_->NotifyIdleTask();
+
+ if (ReachedIdleAllocationLimit(scavenge_speed_in_bytes_per_ms, new_space_size,
+ new_space_capacity)) {
+ if (EnoughIdleTimeForScavenge(
+ idle_time_in_ms, scavenge_speed_in_bytes_per_ms, new_space_size)) {
+ heap->CollectGarbage(NEW_SPACE, "idle task: scavenge");
+ } else {
+ // Immediately request another idle task that can get larger idle time.
+ job_->RescheduleIdleTask(heap);
+ }
+ }
+}
+
+
+bool ScavengeJob::ReachedIdleAllocationLimit(
+ size_t scavenge_speed_in_bytes_per_ms, size_t new_space_size,
+ size_t new_space_capacity) {
+ if (scavenge_speed_in_bytes_per_ms == 0) {
+ scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+ }
+
+ // Set the allocation limit to the number of bytes we can scavenge in an
+ // average idle task.
+ size_t allocation_limit = kAverageIdleTimeMs * scavenge_speed_in_bytes_per_ms;
+
+ // Keep the limit smaller than the new space capacity.
+ allocation_limit =
+ Min(allocation_limit,
+ static_cast<size_t>(new_space_capacity *
+ kMaxAllocationLimitAsFractionOfNewSpace));
+ // Adjust the limit to take into account bytes that will be allocated until
+ // the next check.
+ allocation_limit = allocation_limit < kBytesAllocatedBeforeNextIdleTask
+ ? 0
+ : allocation_limit - kBytesAllocatedBeforeNextIdleTask;
+ // Keep the limit large enough to avoid scavenges in tiny new space.
+ allocation_limit = Max(allocation_limit, kMinAllocationLimit);
+
+ return allocation_limit <= new_space_size;
+}
+
+
+bool ScavengeJob::EnoughIdleTimeForScavenge(
+ double idle_time_in_ms, size_t scavenge_speed_in_bytes_per_ms,
+ size_t new_space_size) {
+ if (scavenge_speed_in_bytes_per_ms == 0) {
+ scavenge_speed_in_bytes_per_ms = kInitialScavengeSpeedInBytesPerMs;
+ }
+ return new_space_size <= idle_time_in_ms * scavenge_speed_in_bytes_per_ms;
+}
+
+
+void ScavengeJob::RescheduleIdleTask(Heap* heap) {
+ // Make sure that we don't reschedule more than one time.
+ // Otherwise, we might spam the scheduler with idle tasks.
+ if (!idle_task_rescheduled_) {
+ ScheduleIdleTask(heap);
+ idle_task_rescheduled_ = true;
+ }
+}
+
+
+void ScavengeJob::ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated) {
+ bytes_allocated_since_the_last_task_ += bytes_allocated;
+ if (bytes_allocated_since_the_last_task_ >=
+ static_cast<int>(kBytesAllocatedBeforeNextIdleTask)) {
+ ScheduleIdleTask(heap);
+ bytes_allocated_since_the_last_task_ = 0;
+ idle_task_rescheduled_ = false;
+ }
+}
+
+
+void ScavengeJob::ScheduleIdleTask(Heap* heap) {
+ if (!idle_task_pending_) {
+ v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
+ if (V8::GetCurrentPlatform()->IdleTasksEnabled(isolate)) {
+ idle_task_pending_ = true;
+ auto task = new IdleTask(heap->isolate(), this);
+ V8::GetCurrentPlatform()->CallIdleOnForegroundThread(isolate, task);
+ }
+ }
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/heap/scavenge-job.h b/deps/v8/src/heap/scavenge-job.h
new file mode 100644
index 0000000000..c9e508ec52
--- /dev/null
+++ b/deps/v8/src/heap/scavenge-job.h
@@ -0,0 +1,80 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGE_JOB_H_
+#define V8_HEAP_SCAVENGE_JOB_H_
+
+#include "src/cancelable-task.h"
+#include "src/heap/gc-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+
+
+// This class posts idle tasks and performs scavenges in the idle tasks.
+class ScavengeJob {
+ public:
+ class IdleTask : public CancelableIdleTask {
+ public:
+ explicit IdleTask(Isolate* isolate, ScavengeJob* job)
+ : CancelableIdleTask(isolate), job_(job) {}
+ // CancelableIdleTask overrides.
+ void RunInternal(double deadline_in_seconds) override;
+
+ private:
+ ScavengeJob* job_;
+ };
+
+ ScavengeJob()
+ : idle_task_pending_(false),
+ idle_task_rescheduled_(false),
+ bytes_allocated_since_the_last_task_(0) {}
+
+ // Posts an idle task if the cumulative bytes allocated since the last
+ // idle task exceed kBytesAllocatedBeforeNextIdleTask.
+ void ScheduleIdleTaskIfNeeded(Heap* heap, int bytes_allocated);
+
+ // Posts an idle task ignoring the bytes allocated, but makes sure
+ // that the new idle task cannot reschedule again.
+ // This prevents infinite rescheduling.
+ void RescheduleIdleTask(Heap* heap);
+
+ bool IdleTaskPending() { return idle_task_pending_; }
+ void NotifyIdleTask() { idle_task_pending_ = false; }
+ bool IdleTaskRescheduled() { return idle_task_rescheduled_; }
+
+ static bool ReachedIdleAllocationLimit(size_t scavenge_speed_in_bytes_per_ms,
+ size_t new_space_size,
+ size_t new_space_capacity);
+
+ static bool EnoughIdleTimeForScavenge(double idle_time_ms,
+ size_t scavenge_speed_in_bytes_per_ms,
+ size_t new_space_size);
+
+ // If we haven't recorded any scavenger events yet, we use a conservative
+ // lower bound for the scavenger speed.
+ static const int kInitialScavengeSpeedInBytesPerMs = 256 * KB;
+ // Estimate of the average idle time that an idle task gets.
+ static const int kAverageIdleTimeMs = 5;
+ // The number of bytes to be allocated in new space before the next idle
+ // task is posted.
+ static const size_t kBytesAllocatedBeforeNextIdleTask = 512 * KB;
+ // The minimum size of allocated new space objects to trigger a scavenge.
+ static const size_t kMinAllocationLimit = 512 * KB;
+ // The allocation limit cannot exceed this fraction of the new space capacity.
+ static const double kMaxAllocationLimitAsFractionOfNewSpace;
+
+ private:
+ void ScheduleIdleTask(Heap* heap);
+ bool idle_task_pending_;
+ bool idle_task_rescheduled_;
+ int bytes_allocated_since_the_last_task_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_HEAP_SCAVENGE_JOB_H_
diff --git a/deps/v8/src/heap/scavenger-inl.h b/deps/v8/src/heap/scavenger-inl.h
new file mode 100644
index 0000000000..6ac64f2eb6
--- /dev/null
+++ b/deps/v8/src/heap/scavenger-inl.h
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGER_INL_H_
+#define V8_HEAP_SCAVENGER_INL_H_
+
+#include "src/heap/scavenger.h"
+
+namespace v8 {
+namespace internal {
+
+void Scavenger::ScavengeObject(HeapObject** p, HeapObject* object) {
+ DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+
+ // We use the first word (where the map pointer usually is) of a heap
+ // object to record the forwarding pointer. A forwarding pointer can
+ // point to an old space, the code space, or the to space of the new
+ // generation.
+ MapWord first_word = object->map_word();
+
+ // If the first word is a forwarding address, the object has already been
+ // copied.
+ if (first_word.IsForwardingAddress()) {
+ HeapObject* dest = first_word.ToForwardingAddress();
+ DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
+ *p = dest;
+ return;
+ }
+
+ Heap::UpdateAllocationSiteFeedback(object, Heap::IGNORE_SCRATCHPAD_SLOT);
+
+ // AllocationMementos are unrooted and shouldn't survive a scavenge
+ DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
+ // Call the slow part of scavenge object.
+ return ScavengeObjectSlow(p, object);
+}
+
+
+// static
+void StaticScavengeVisitor::VisitPointer(Heap* heap, Object** p) {
+ Object* object = *p;
+ if (!heap->InNewSpace(object)) return;
+ Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_SCAVENGER_INL_H_
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
new file mode 100644
index 0000000000..74ed665c3f
--- /dev/null
+++ b/deps/v8/src/heap/scavenger.cc
@@ -0,0 +1,496 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/scavenger.h"
+
+#include "src/contexts.h"
+#include "src/heap/heap.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/scavenger-inl.h"
+#include "src/isolate.h"
+#include "src/log.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+enum LoggingAndProfiling {
+ LOGGING_AND_PROFILING_ENABLED,
+ LOGGING_AND_PROFILING_DISABLED
+};
+
+
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+
+
+template <MarksHandling marks_handling,
+ LoggingAndProfiling logging_and_profiling_mode>
+class ScavengingVisitor : public StaticVisitorBase {
+ public:
+ static void Initialize() {
+ table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
+ table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
+ table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+ table_.Register(kVisitByteArray, &EvacuateByteArray);
+ table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+ table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+ table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+ table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
+ table_.Register(kVisitJSArrayBuffer, &EvacuateJSArrayBuffer);
+
+ table_.Register(
+ kVisitNativeContext,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ Context::kSize>);
+
+ table_.Register(
+ kVisitConsString,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ ConsString::kSize>);
+
+ table_.Register(
+ kVisitSlicedString,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ SlicedString::kSize>);
+
+ table_.Register(
+ kVisitSymbol,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ Symbol::kSize>);
+
+ table_.Register(
+ kVisitSharedFunctionInfo,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ SharedFunctionInfo::kSize>);
+
+ table_.Register(kVisitJSWeakCollection,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+ table_.Register(kVisitJSTypedArray,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+ table_.Register(kVisitJSDataView,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+ table_.Register(kVisitJSRegExp,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+ if (marks_handling == IGNORE_MARKS) {
+ table_.Register(
+ kVisitJSFunction,
+ &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ JSFunction::kSize>);
+ } else {
+ table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+ }
+
+ table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
+ kVisitDataObject, kVisitDataObjectGeneric>();
+
+ table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+ kVisitJSObject, kVisitJSObjectGeneric>();
+
+ table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+ kVisitStruct, kVisitStructGeneric>();
+ }
+
+ static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+ return &table_;
+ }
+
+ private:
+ enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+
+ static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
+ bool should_record = false;
+#ifdef DEBUG
+ should_record = FLAG_heap_stats;
+#endif
+ should_record = should_record || FLAG_log_gc;
+ if (should_record) {
+ if (heap->new_space()->Contains(obj)) {
+ heap->new_space()->RecordAllocation(obj);
+ } else {
+ heap->new_space()->RecordPromotion(obj);
+ }
+ }
+ }
+
+ // Helper function used by CopyObject to copy a source object to an
+ // allocated target object and update the forwarding pointer in the source
+ // object. Returns the target object.
+ INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
+ HeapObject* target, int size)) {
+ // If we migrate into to-space, then the to-space top pointer should be
+ // right after the target object. Incorporate double alignment
+ // over-allocation.
+ DCHECK(!heap->InToSpace(target) ||
+ target->address() + size == heap->new_space()->top() ||
+ target->address() + size + kPointerSize == heap->new_space()->top());
+
+ // Make sure that we do not overwrite the promotion queue which is at
+ // the end of to-space.
+ DCHECK(!heap->InToSpace(target) ||
+ heap->promotion_queue()->IsBelowPromotionQueue(
+ heap->new_space()->top()));
+
+ // Copy the content of source to target.
+ heap->CopyBlock(target->address(), source->address(), size);
+
+ // Set the forwarding address.
+ source->set_map_word(MapWord::FromForwardingAddress(target));
+
+ if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
+ // Update NewSpace stats if necessary.
+ RecordCopiedObject(heap, target);
+ heap->OnMoveEvent(target, source, size);
+ }
+
+ if (marks_handling == TRANSFER_MARKS) {
+ if (Marking::TransferColor(source, target)) {
+ MemoryChunk::IncrementLiveBytesFromGC(target, size);
+ }
+ }
+ }
+
+ template <AllocationAlignment alignment>
+ static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ Heap* heap = map->GetHeap();
+
+ DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+ AllocationResult allocation =
+ heap->new_space()->AllocateRaw(object_size, alignment);
+
+ HeapObject* target = NULL; // Initialization to please compiler.
+ if (allocation.To(&target)) {
+ // Order is important here: Set the promotion limit before storing a
+ // filler for double alignment or migrating the object. Otherwise we
+ // may end up overwriting promotion queue entries when we migrate the
+ // object.
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
+ MigrateObject(heap, object, target, object_size);
+
+ // Update slot to new target.
+ *slot = target;
+
+ heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+ return true;
+ }
+ return false;
+ }
+
+
+ template <ObjectContents object_contents, AllocationAlignment alignment>
+ static inline bool PromoteObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ Heap* heap = map->GetHeap();
+
+ AllocationResult allocation =
+ heap->old_space()->AllocateRaw(object_size, alignment);
+
+ HeapObject* target = NULL; // Initialization to please compiler.
+ if (allocation.To(&target)) {
+ MigrateObject(heap, object, target, object_size);
+
+ // Update slot to new target.
+ *slot = target;
+
+ if (object_contents == POINTER_OBJECT) {
+ if (map->instance_type() == JS_FUNCTION_TYPE) {
+ heap->promotion_queue()->insert(target,
+ JSFunction::kNonWeakFieldsEndOffset);
+ } else {
+ heap->promotion_queue()->insert(target, object_size);
+ }
+ }
+ heap->IncrementPromotedObjectsSize(object_size);
+ return true;
+ }
+ return false;
+ }
+
+
+ template <ObjectContents object_contents, AllocationAlignment alignment>
+ static inline void EvacuateObject(Map* map, HeapObject** slot,
+ HeapObject* object, int object_size) {
+ SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+ SLOW_DCHECK(object->Size() == object_size);
+ Heap* heap = map->GetHeap();
+
+ if (!heap->ShouldBePromoted(object->address(), object_size)) {
+ // A semi-space copy may fail due to fragmentation. In that case, we
+ // try to promote the object.
+ if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
+ return;
+ }
+ }
+
+ if (PromoteObject<object_contents, alignment>(map, slot, object,
+ object_size)) {
+ return;
+ }
+
+ // If promotion failed, we try to copy the object to the other semi-space
+ if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
+
+ UNREACHABLE();
+ }
+
+
+ static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+ JSFunction::kSize>(map, slot, object);
+
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ HeapObject* target = map_word.ToForwardingAddress();
+
+ MarkBit mark_bit = Marking::MarkBitFrom(target);
+ if (Marking::IsBlack(mark_bit)) {
+ // This object is black and it might not be rescanned by marker.
+ // We should explicitly record code entry slot for compaction because
+ // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+ // miss it as it is not HeapObject-tagged.
+ Address code_entry_slot =
+ target->address() + JSFunction::kCodeEntryOffset;
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+ map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
+ target, code_entry_slot, code);
+ }
+ }
+
+
+ static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ object_size);
+ }
+
+
+ static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+ int object_size = FixedDoubleArray::SizeFor(length);
+ EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
+ }
+
+
+ static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ FixedTypedArrayBase* target =
+ reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
+ if (target->base_pointer() != Smi::FromInt(0))
+ target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ }
+
+
+ static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
+ EvacuateObject<DATA_OBJECT, kDoubleAligned>(map, slot, object, object_size);
+
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ FixedTypedArrayBase* target =
+ reinterpret_cast<FixedTypedArrayBase*>(map_word.ToForwardingAddress());
+ if (target->base_pointer() != Smi::FromInt(0))
+ target->set_base_pointer(target, SKIP_WRITE_BARRIER);
+ }
+
+
+ static inline void EvacuateJSArrayBuffer(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object);
+
+ Heap* heap = map->GetHeap();
+ MapWord map_word = object->map_word();
+ DCHECK(map_word.IsForwardingAddress());
+ HeapObject* target = map_word.ToForwardingAddress();
+ if (!heap->InNewSpace(target)) {
+ heap->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
+ }
+ }
+
+
+ static inline void EvacuateByteArray(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+ }
+
+
+ static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqOneByteString::cast(object)
+ ->SeqOneByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+ }
+
+
+ static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ int object_size = SeqTwoByteString::cast(object)
+ ->SeqTwoByteStringSize(map->instance_type());
+ EvacuateObject<DATA_OBJECT, kWordAligned>(map, slot, object, object_size);
+ }
+
+
+ static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ DCHECK(IsShortcutCandidate(map->instance_type()));
+
+ Heap* heap = map->GetHeap();
+
+ if (marks_handling == IGNORE_MARKS &&
+ ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
+ HeapObject* first =
+ HeapObject::cast(ConsString::cast(object)->unchecked_first());
+
+ *slot = first;
+
+ if (!heap->InNewSpace(first)) {
+ object->set_map_word(MapWord::FromForwardingAddress(first));
+ return;
+ }
+
+ MapWord first_word = first->map_word();
+ if (first_word.IsForwardingAddress()) {
+ HeapObject* target = first_word.ToForwardingAddress();
+
+ *slot = target;
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+ return;
+ }
+
+ Scavenger::ScavengeObjectSlow(slot, first);
+ object->set_map_word(MapWord::FromForwardingAddress(*slot));
+ return;
+ }
+
+ int object_size = ConsString::kSize;
+ EvacuateObject<POINTER_OBJECT, kWordAligned>(map, slot, object,
+ object_size);
+ }
+
+ template <ObjectContents object_contents>
+ class ObjectEvacuationStrategy {
+ public:
+ template <int object_size>
+ static inline void VisitSpecialized(Map* map, HeapObject** slot,
+ HeapObject* object) {
+ EvacuateObject<object_contents, kWordAligned>(map, slot, object,
+ object_size);
+ }
+
+ static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
+ int object_size = map->instance_size();
+ EvacuateObject<object_contents, kWordAligned>(map, slot, object,
+ object_size);
+ }
+ };
+
+ static VisitorDispatchTable<ScavengingCallback> table_;
+};
+
+
+template <MarksHandling marks_handling,
+ LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+ ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+
+
+// static
+void Scavenger::Initialize() {
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_ENABLED>::Initialize();
+ ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+}
+
+
+// static
+void Scavenger::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
+ SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+ MapWord first_word = object->map_word();
+ SLOW_DCHECK(!first_word.IsForwardingAddress());
+ Map* map = first_word.ToMap();
+ Scavenger* scavenger = map->GetHeap()->scavenge_collector_;
+ scavenger->scavenging_visitors_table_.GetVisitor(map)(map, p, object);
+}
+
+
+void Scavenger::SelectScavengingVisitorsTable() {
+ bool logging_and_profiling =
+ FLAG_verify_predictable || isolate()->logger()->is_logging() ||
+ isolate()->cpu_profiler()->is_profiling() ||
+ (isolate()->heap_profiler() != NULL &&
+ isolate()->heap_profiler()->is_tracking_object_moves());
+
+ if (!heap()->incremental_marking()->IsMarking()) {
+ if (!logging_and_profiling) {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<IGNORE_MARKS,
+ LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ } else {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<IGNORE_MARKS,
+ LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ }
+ } else {
+ if (!logging_and_profiling) {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_DISABLED>::GetTable());
+ } else {
+ scavenging_visitors_table_.CopyFrom(
+ ScavengingVisitor<TRANSFER_MARKS,
+ LOGGING_AND_PROFILING_ENABLED>::GetTable());
+ }
+
+ if (heap()->incremental_marking()->IsCompacting()) {
+ // When compacting forbid short-circuiting of cons-strings.
+ // Scavenging code relies on the fact that new space object
+ // can't be evacuated into evacuation candidate but
+ // short-circuiting violates this assumption.
+ scavenging_visitors_table_.Register(
+ StaticVisitorBase::kVisitShortcutCandidate,
+ scavenging_visitors_table_.GetVisitorById(
+ StaticVisitorBase::kVisitConsString));
+ }
+ }
+}
+
+
+Isolate* Scavenger::isolate() { return heap()->isolate(); }
+
+
+void ScavengeVisitor::VisitPointer(Object** p) { ScavengePointer(p); }
+
+
+void ScavengeVisitor::VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) ScavengePointer(p);
+}
+
+
+void ScavengeVisitor::ScavengePointer(Object** p) {
+ Object* object = *p;
+ if (!heap_->InNewSpace(object)) return;
+ Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+ reinterpret_cast<HeapObject*>(object));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
new file mode 100644
index 0000000000..44da98c86c
--- /dev/null
+++ b/deps/v8/src/heap/scavenger.h
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SCAVENGER_H_
+#define V8_HEAP_SCAVENGER_H_
+
+#include "src/heap/objects-visiting.h"
+
+namespace v8 {
+namespace internal {
+
+typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
+ HeapObject* object);
+
+class Scavenger {
+ public:
+ explicit Scavenger(Heap* heap) : heap_(heap) {}
+
+ // Initializes static visitor dispatch tables.
+ static void Initialize();
+
+ // Callback function passed to Heap::Iterate etc. Copies an object if
+ // necessary, the object might be promoted to an old space. The caller must
+ // ensure the precondition that the object is (a) a heap object and (b) in
+ // the heap's from space.
+ static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+ // Slow part of {ScavengeObject} above.
+ static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
+ // Chooses an appropriate static visitor table depending on the current state
+ // of the heap (i.e. incremental marking, logging and profiling).
+ void SelectScavengingVisitorsTable();
+
+ Isolate* isolate();
+ Heap* heap() { return heap_; }
+
+ private:
+ Heap* heap_;
+ VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+};
+
+
+// Helper class for turning the scavenger into an object visitor that is also
+// filtering out non-HeapObjects and objects which do not reside in new space.
+class ScavengeVisitor : public ObjectVisitor {
+ public:
+ explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
+
+ void VisitPointer(Object** p);
+ void VisitPointers(Object** start, Object** end);
+
+ private:
+ inline void ScavengePointer(Object** p);
+
+ Heap* heap_;
+};
+
+
+// Helper class for turning the scavenger into an object visitor that is also
+// filtering out non-HeapObjects and objects which do not reside in new space.
+class StaticScavengeVisitor
+ : public StaticNewSpaceVisitor<StaticScavengeVisitor> {
+ public:
+ static inline void VisitPointer(Heap* heap, Object** p);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_SCAVENGER_H_
diff --git a/deps/v8/src/heap/slots-buffer.cc b/deps/v8/src/heap/slots-buffer.cc
new file mode 100644
index 0000000000..3f145e6e2e
--- /dev/null
+++ b/deps/v8/src/heap/slots-buffer.cc
@@ -0,0 +1,161 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/slots-buffer.h"
+
+#include "src/assembler.h"
+#include "src/heap/heap.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+ return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address, SlotType type,
+ Address addr, AdditionMode mode) {
+ SlotsBuffer* buffer = *buffer_address;
+ if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+ if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+ allocator->DeallocateChain(buffer_address);
+ return false;
+ }
+ buffer = allocator->AllocateBuffer(buffer);
+ *buffer_address = buffer;
+ }
+ DCHECK(buffer->HasSpaceForTypedSlot());
+ buffer->Add(reinterpret_cast<ObjectSlot>(type));
+ buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+ return true;
+}
+
+
+void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ const ObjectSlot kRemovedEntry = HeapObject::RawField(
+ heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+ DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+ ->NeverEvacuate());
+
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Object* object = *slot;
+ // Slots are invalid when they currently:
+ // - do not point to a heap object (SMI)
+ // - point to a heap object in new space
+ // - are not within a live heap object on a valid pointer slot
+ // - point to a heap object not on an evacuation candidate
+ if (!object->IsHeapObject() || heap->InNewSpace(object) ||
+ !heap->mark_compact_collector()->IsSlotInLiveObject(
+ reinterpret_cast<Address>(slot)) ||
+ !Page::FromAddress(reinterpret_cast<Address>(object))
+ ->IsEvacuationCandidate()) {
+ // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
+ // could shrink the slots buffer in-place.
+ slots[slot_idx] = kRemovedEntry;
+ }
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
+void SlotsBuffer::RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+ Address start_slot, Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ const ObjectSlot kRemovedEntry = HeapObject::RawField(
+ heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+ DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+ ->NeverEvacuate());
+
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+ bool is_typed_slot = false;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Address slot_address = reinterpret_cast<Address>(slot);
+ if (slot_address >= start_slot && slot_address < end_slot) {
+ // TODO(hpayer): Instead of replacing slots with kRemovedEntry we
+ // could shrink the slots buffer in-place.
+ slots[slot_idx] = kRemovedEntry;
+ if (is_typed_slot) {
+ slots[slot_idx - 1] = kRemovedEntry;
+ }
+ }
+ is_typed_slot = false;
+ } else {
+ is_typed_slot = true;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
+void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Object* object = *slot;
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ CHECK(!heap->InNewSpace(object));
+ heap->mark_compact_collector()->VerifyIsSlotInLiveObject(
+ reinterpret_cast<Address>(slot), heap_object);
+ }
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+ return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+ delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+ SlotsBuffer* buffer = *buffer_address;
+ while (buffer != NULL) {
+ SlotsBuffer* next_buffer = buffer->next();
+ DeallocateBuffer(buffer);
+ buffer = next_buffer;
+ }
+ *buffer_address = NULL;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/slots-buffer.h b/deps/v8/src/heap/slots-buffer.h
new file mode 100644
index 0000000000..dc6c922963
--- /dev/null
+++ b/deps/v8/src/heap/slots-buffer.h
@@ -0,0 +1,175 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SLOTS_BUFFER_H_
+#define V8_HEAP_SLOTS_BUFFER_H_
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class SlotsBuffer;
+
+
+// SlotsBufferAllocator manages the allocation and deallocation of slots buffer
+// chunks and links them together. Slots buffer chunks are always created by the
+// SlotsBufferAllocator.
+class SlotsBufferAllocator {
+ public:
+ SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+ void DeallocateBuffer(SlotsBuffer* buffer);
+
+ void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+// - Untyped slots are expected to contain a tagged object pointer.
+// They are recorded by an address.
+// - Typed slots are expected to contain an encoded pointer to a heap
+// object where the way of encoding depends on the type of the slot.
+// They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+ typedef Object** ObjectSlot;
+
+ explicit SlotsBuffer(SlotsBuffer* next_buffer)
+ : idx_(0), chain_length_(1), next_(next_buffer) {
+ if (next_ != NULL) {
+ chain_length_ = next_->chain_length_ + 1;
+ }
+ }
+
+ ~SlotsBuffer() {}
+
+ void Add(ObjectSlot slot) {
+ DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
+#ifdef DEBUG
+ if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
+ DCHECK_NOT_NULL(*slot);
+ }
+#endif
+ slots_[idx_++] = slot;
+ }
+
+ ObjectSlot Get(intptr_t i) {
+ DCHECK(i >= 0 && i < kNumberOfElements);
+ return slots_[i];
+ }
+
+ size_t Size() {
+ DCHECK(idx_ <= kNumberOfElements);
+ return idx_;
+ }
+
+ enum SlotType {
+ EMBEDDED_OBJECT_SLOT,
+ OBJECT_SLOT,
+ RELOCATED_CODE_OBJECT,
+ CELL_TARGET_SLOT,
+ CODE_TARGET_SLOT,
+ CODE_ENTRY_SLOT,
+ DEBUG_TARGET_SLOT,
+ NUMBER_OF_SLOT_TYPES
+ };
+
+ static const char* SlotTypeToString(SlotType type) {
+ switch (type) {
+ case EMBEDDED_OBJECT_SLOT:
+ return "EMBEDDED_OBJECT_SLOT";
+ case OBJECT_SLOT:
+ return "OBJECT_SLOT";
+ case RELOCATED_CODE_OBJECT:
+ return "RELOCATED_CODE_OBJECT";
+ case CELL_TARGET_SLOT:
+ return "CELL_TARGET_SLOT";
+ case CODE_TARGET_SLOT:
+ return "CODE_TARGET_SLOT";
+ case CODE_ENTRY_SLOT:
+ return "CODE_ENTRY_SLOT";
+ case DEBUG_TARGET_SLOT:
+ return "DEBUG_TARGET_SLOT";
+ case NUMBER_OF_SLOT_TYPES:
+ return "NUMBER_OF_SLOT_TYPES";
+ }
+ return "UNKNOWN SlotType";
+ }
+
+ SlotsBuffer* next() { return next_; }
+
+ static int SizeOfChain(SlotsBuffer* buffer) {
+ if (buffer == NULL) return 0;
+ return static_cast<int>(buffer->idx_ +
+ (buffer->chain_length_ - 1) * kNumberOfElements);
+ }
+
+ inline bool IsFull() { return idx_ == kNumberOfElements; }
+
+ inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
+
+ enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
+
+ static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+ return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+ }
+
+ INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address, ObjectSlot slot,
+ AdditionMode mode)) {
+ SlotsBuffer* buffer = *buffer_address;
+ if (buffer == NULL || buffer->IsFull()) {
+ if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+ allocator->DeallocateChain(buffer_address);
+ return false;
+ }
+ buffer = allocator->AllocateBuffer(buffer);
+ *buffer_address = buffer;
+ }
+ buffer->Add(slot);
+ return true;
+ }
+
+ static bool IsTypedSlot(ObjectSlot slot);
+
+ static bool AddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address, SlotType type, Address addr,
+ AdditionMode mode);
+
+ // Eliminates all stale entries from the slots buffer, i.e., slots that
+ // are not part of live objects anymore. This method must be called after
+ // marking, when the whole transitive closure is known and must be called
+ // before sweeping when mark bits are still intact.
+ static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
+
+ // Eliminate all slots that are within the given address range.
+ static void RemoveObjectSlots(Heap* heap, SlotsBuffer* buffer,
+ Address start_slot, Address end_slot);
+
+ // Ensures that there are no invalid slots in the chain of slots buffers.
+ static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
+
+ static const int kNumberOfElements = 1021;
+
+ private:
+ static const int kChainLengthThreshold = 15;
+
+ intptr_t idx_;
+ intptr_t chain_length_;
+ SlotsBuffer* next_;
+ ObjectSlot slots_[kNumberOfElements];
+};
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_SLOTS_BUFFER_H_
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 762558e11b..a12ed6f296 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -6,9 +6,9 @@
#define V8_HEAP_SPACES_INL_H_
#include "src/heap/spaces.h"
-#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/msan.h"
+#include "src/profiler/heap-profiler.h"
#include "src/v8memory.h"
namespace v8 {
@@ -356,6 +356,13 @@ AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
}
+AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
+ int size_in_bytes) {
+ base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
+ return AllocateRawUnaligned(size_in_bytes);
+}
+
+
// Raw allocation.
AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
AllocationAlignment alignment) {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index e66fd3944c..cd8a72951c 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -7,7 +7,7 @@
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
#include "src/snapshot/snapshot.h"
@@ -80,8 +80,7 @@ CodeRange::CodeRange(Isolate* isolate)
code_range_(NULL),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0),
- emergency_block_() {}
+ current_allocation_block_index_(0) {}
bool CodeRange::SetUp(size_t requested) {
@@ -140,7 +139,6 @@ bool CodeRange::SetUp(size_t requested) {
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- ReserveEmergencyBlock();
return true;
}
@@ -198,7 +196,10 @@ bool CodeRange::GetNextAllocationBlock(size_t requested) {
Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated) {
- DCHECK(commit_size <= requested_size);
+ // request_size includes guards while committed_size does not. Make sure
+ // callers know about the invariant.
+ CHECK_LE(commit_size,
+ requested_size - 2 * MemoryAllocator::CodePageGuardSize());
FreeBlock current;
if (!ReserveBlock(requested_size, &current)) {
*allocated = 0;
@@ -229,6 +230,7 @@ bool CodeRange::UncommitRawMemory(Address start, size_t length) {
void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length);
}
@@ -237,12 +239,14 @@ void CodeRange::FreeRawMemory(Address address, size_t length) {
void CodeRange::TearDown() {
delete code_range_; // Frees all memory in the virtual memory range.
code_range_ = NULL;
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Free();
allocation_list_.Free();
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
DCHECK(allocation_list_.length() == 0 ||
current_allocation_block_index_ < allocation_list_.length());
if (allocation_list_.length() == 0 ||
@@ -264,24 +268,9 @@ bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
}
-void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
-
-
-void CodeRange::ReserveEmergencyBlock() {
- const size_t requested_size = MemoryAllocator::CodePageAreaSize();
- if (emergency_block_.size == 0) {
- ReserveBlock(requested_size, &emergency_block_);
- } else {
- DCHECK(emergency_block_.size >= requested_size);
- }
-}
-
-
-void CodeRange::ReleaseEmergencyBlock() {
- if (emergency_block_.size != 0) {
- ReleaseBlock(&emergency_block_);
- emergency_block_.size = 0;
- }
+void CodeRange::ReleaseBlock(const FreeBlock* block) {
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
+ free_list_.Add(*block);
}
@@ -313,7 +302,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator.
- DCHECK(size_ == 0);
+ DCHECK(size_.Value() == 0);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
@@ -332,26 +321,30 @@ bool MemoryAllocator::CommitMemory(Address base, size_t size,
}
-void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
- Executability executable) {
- // TODO(gc) make code_range part of memory allocator?
- DCHECK(reservation->IsReserved());
- size_t size = reservation->size();
- DCHECK(size_ >= size);
- size_ -= size;
+void MemoryAllocator::FreeNewSpaceMemory(Address addr,
+ base::VirtualMemory* reservation,
+ Executability executable) {
+ LOG(isolate_, DeleteEvent("NewSpace", addr));
+ DCHECK(reservation->IsReserved());
+ const intptr_t size = static_cast<intptr_t>(reservation->size());
+ DCHECK(size_.Value() >= size);
+ size_.Increment(-size);
isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+ FreeMemory(reservation, NOT_EXECUTABLE);
+}
- if (executable == EXECUTABLE) {
- DCHECK(size_executable_ >= size);
- size_executable_ -= size;
- }
+
+void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
+ Executability executable) {
+ // TODO(gc) make code_range part of memory allocator?
// Code which is part of the code-range does not have its own VirtualMemory.
DCHECK(isolate_->code_range() == NULL ||
!isolate_->code_range()->contains(
static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid() || size <= Page::kPageSize);
+ !isolate_->code_range()->valid() ||
+ reservation->size() <= Page::kPageSize);
reservation->Release();
}
@@ -360,15 +353,6 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
- DCHECK(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
- if (executable == EXECUTABLE) {
- DCHECK(size_executable_ >= size);
- size_executable_ -= size;
- }
if (isolate_->code_range() != NULL &&
isolate_->code_range()->contains(static_cast<Address>(base))) {
DCHECK(executable == EXECUTABLE);
@@ -388,7 +372,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL;
- size_ += reservation.size();
+ size_.Increment(static_cast<intptr_t>(reservation.size()));
Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment);
controller->TakeControl(&reservation);
@@ -444,8 +428,6 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
area_end, NOT_EXECUTABLE, semi_space);
- chunk->set_next_chunk(NULL);
- chunk->set_prev_chunk(NULL);
chunk->initialize_scan_on_scavenge(true);
bool in_to_space = (semi_space->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
@@ -486,8 +468,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->set_parallel_sweeping(SWEEPING_DONE);
+ chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
+ chunk->parallel_sweeping_state().SetValue(kSweepingDone);
+ chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
@@ -498,6 +481,8 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
chunk->SetFlag(WAS_SWEPT);
+ chunk->set_next_chunk(nullptr);
+ chunk->set_prev_chunk(nullptr);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -632,7 +617,8 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
CodePageGuardSize();
// Check executable memory limit.
- if (size_executable_ + chunk_size > capacity_executable_) {
+ if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
+ capacity_executable_) {
LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded"));
return NULL;
@@ -656,16 +642,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL;
- size_ += chunk_size;
+ size_.Increment(static_cast<intptr_t>(chunk_size));
// Update executable memory size.
- size_executable_ += chunk_size;
+ size_executable_.Increment(static_cast<intptr_t>(chunk_size));
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
&reservation);
if (base == NULL) return NULL;
// Update executable memory size.
- size_executable_ += reservation.size();
+ size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
}
if (Heap::ShouldZapGarbage()) {
@@ -725,9 +711,7 @@ void Page::ResetFreeListStatistics() {
Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
-
if (chunk == NULL) return NULL;
-
return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
@@ -742,7 +726,8 @@ LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
}
-void MemoryAllocator::Free(MemoryChunk* chunk) {
+void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
if (chunk->owner() != NULL) {
ObjectSpace space =
@@ -753,9 +738,29 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
- delete chunk->slots_buffer();
- delete chunk->skip_list();
- delete chunk->mutex();
+ intptr_t size;
+ base::VirtualMemory* reservation = chunk->reserved_memory();
+ if (reservation->IsReserved()) {
+ size = static_cast<intptr_t>(reservation->size());
+ } else {
+ size = static_cast<intptr_t>(chunk->size());
+ }
+ DCHECK(size_.Value() >= size);
+ size_.Increment(-size);
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+ if (chunk->executable() == EXECUTABLE) {
+ DCHECK(size_executable_.Value() >= size);
+ size_executable_.Increment(-size);
+ }
+
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+}
+
+
+void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ chunk->ReleaseAllocatedMemory();
base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
@@ -766,6 +771,12 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+ PreFreeMemory(chunk);
+ PerformFreeMemory(chunk);
+}
+
+
bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) {
if (!CommitMemory(start, size, executable)) return false;
@@ -840,13 +851,14 @@ void MemoryAllocator::RemoveMemoryAllocationCallback(
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
+ intptr_t size = Size();
+ float pct = static_cast<float>(capacity_ - size) / capacity_;
PrintF(" capacity: %" V8_PTR_PREFIX
"d"
", used: %" V8_PTR_PREFIX
"d"
", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct * 100));
+ capacity_, size, static_cast<int>(pct * 100));
}
#endif
@@ -918,6 +930,13 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
}
+void MemoryChunk::ReleaseAllocatedMemory() {
+ delete slots_buffer_;
+ delete skip_list_;
+ delete mutex_;
+}
+
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
@@ -936,8 +955,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
: Space(heap, space, executable),
free_list_(this),
unswept_free_bytes_(0),
- end_of_unswept_pages_(NULL),
- emergency_memory_(NULL) {
+ end_of_unswept_pages_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
@@ -965,6 +983,51 @@ void PagedSpace::TearDown() {
}
+void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
+ DCHECK(identity() == other->identity());
+ // Destroy the linear allocation space of {other}. This is needed to
+ // (a) not waste the memory and
+ // (b) keep the rest of the chunk in an iterable state (filler is needed).
+ other->EmptyAllocationInfo();
+
+ // Move over the free list. Concatenate makes sure that the source free list
+ // gets properly reset after moving over all nodes.
+ intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
+
+ // Moved memory is not recorded as allocated memory, but rather increases and
+ // decreases capacity of the corresponding spaces. Used size and waste size
+ // are maintained by the receiving space upon allocating and freeing blocks.
+ other->accounting_stats_.DecreaseCapacity(freed_bytes);
+ accounting_stats_.IncreaseCapacity(freed_bytes);
+}
+
+
+void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+ // Unmerged fields:
+ // area_size_
+ // allocation_info_
+ // end_of_unswept_pages_
+ // unswept_free_bytes_
+ // anchor_
+
+ MoveOverFreeMemory(other);
+
+ // Update and clear accounting statistics.
+ accounting_stats_.Merge(other->accounting_stats_);
+ other->accounting_stats_.Reset();
+
+ // Move over pages.
+ PageIterator it(other);
+ Page* p = nullptr;
+ while (it.has_next()) {
+ p = it.next();
+ p->Unlink();
+ p->set_owner(this);
+ p->InsertAfter(anchor_.prev_page());
+ }
+}
+
+
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -1006,29 +1069,27 @@ Object* PagedSpace::FindObject(Address addr) {
}
-bool PagedSpace::CanExpand() {
+bool PagedSpace::CanExpand(size_t size) {
DCHECK(heap()->mark_compact_collector()->is_compacting() ||
Capacity() <= heap()->MaxOldGenerationSize());
- DCHECK(heap()->CommittedOldGenerationMemory() <=
- heap()->MaxOldGenerationSize() +
- PagedSpace::MaxEmergencyMemoryAllocated());
- // Are we going to exceed capacity for this space?
- if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
+ // Are we going to exceed capacity for this space? At this point we can be
+ // way over the maximum size because of AlwaysAllocate scopes and large
+ // objects.
+ if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
return true;
}
bool PagedSpace::Expand() {
- if (!CanExpand()) return false;
-
intptr_t size = AreaSize();
-
- if (anchor_.next_page() == &anchor_) {
+ if (snapshotable() && !HasPages()) {
size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
+ if (!CanExpand(size)) return false;
+
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
executable());
if (p == NULL) return false;
@@ -1037,9 +1098,6 @@ bool PagedSpace::Expand() {
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
- DCHECK(heap()->CommittedOldGenerationMemory() <=
- heap()->MaxOldGenerationSize() +
- PagedSpace::MaxEmergencyMemoryAllocated());
p->InsertAfter(anchor_.prev_page());
@@ -1109,51 +1167,6 @@ void PagedSpace::ReleasePage(Page* page) {
}
-intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
- // New space and large object space.
- static const int spaces_without_emergency_memory = 2;
- static const int spaces_with_emergency_memory =
- LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
- return Page::kPageSize * spaces_with_emergency_memory;
-}
-
-
-void PagedSpace::CreateEmergencyMemory() {
- if (identity() == CODE_SPACE) {
- // Make the emergency block available to the allocator.
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- code_range->ReleaseEmergencyBlock();
- }
- DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
- }
- emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
- AreaSize(), AreaSize(), executable(), this);
-}
-
-
-void PagedSpace::FreeEmergencyMemory() {
- Page* page = static_cast<Page*>(emergency_memory_);
- DCHECK(page->LiveBytes() == 0);
- DCHECK(AreaSize() == page->area_size());
- DCHECK(!free_list_.ContainsPageFreeListItems(page));
- heap()->isolate()->memory_allocator()->Free(page);
- emergency_memory_ = NULL;
-}
-
-
-void PagedSpace::UseEmergencyMemory() {
- // Page::Initialize makes the chunk into a real page and adds it to the
- // accounting for this space. Unlike PagedSpace::Expand, we don't check
- // CanExpand first, so we can go over the limits a little here. That's OK,
- // because we are in the process of compacting which will free up at least as
- // much memory as it allocates.
- Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
- page->InsertAfter(anchor_.prev_page());
- emergency_memory_ = NULL;
-}
-
-
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -1284,11 +1297,9 @@ void NewSpace::TearDown() {
to_space_.TearDown();
from_space_.TearDown();
- LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+ heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
+ chunk_base_, &reservation_, NOT_EXECUTABLE);
- DCHECK(reservation_.IsReserved());
- heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
- NOT_EXECUTABLE);
chunk_base_ = NULL;
chunk_size_ = 0;
}
@@ -1380,6 +1391,7 @@ void NewSpace::UpdateAllocationInfo() {
void NewSpace::ResetAllocationInfo() {
+ Address old_top = allocation_info_.top();
to_space_.Reset();
UpdateAllocationInfo();
pages_used_ = 0;
@@ -1388,6 +1400,7 @@ void NewSpace::ResetAllocationInfo() {
while (it.has_next()) {
Bitmap::Clear(it.next());
}
+ InlineAllocationStep(old_top, allocation_info_.top());
}
@@ -1397,7 +1410,7 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (inline_allocation_limit_step() == 0) {
+ } else if (inline_allocation_limit_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
@@ -1466,13 +1479,9 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
- // Do a step for the bytes allocated on the last page.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
- old_top = allocation_info_.top();
- top_on_previous_step_ = old_top;
+ InlineAllocationStep(old_top, allocation_info_.top());
+ old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
@@ -1482,19 +1491,27 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
- // or because incremental marking wants to get a chance to do a step. Set
- // the new limit accordingly.
+ // or because incremental marking wants to get a chance to do a step,
+ // or because idle scavenge job wants to get a chance to post a task.
+ // Set the new limit accordingly.
Address new_top = old_top + aligned_size_in_bytes;
- int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
+ InlineAllocationStep(new_top, new_top);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
- top_on_previous_step_ = new_top;
}
return true;
}
+void NewSpace::InlineAllocationStep(Address top, Address new_top) {
+ if (top_on_previous_step_) {
+ int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
+ heap()->ScheduleIdleScavengeIfNeeded(bytes_allocated);
+ heap()->incremental_marking()->Step(bytes_allocated,
+ IncrementalMarking::GC_VIA_STACK_GUARD);
+ top_on_previous_step_ = new_top;
+ }
+}
+
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
@@ -2051,9 +2068,10 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
if (category->top() != NULL) {
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
- // reverse order.
- base::LockGuard<base::Mutex> target_lock_guard(mutex());
- base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
+ // reverse order. Furthermore, we only lock if the PagedSpace containing
+ // the free list is know to be globally available, i.e., not local.
+ if (!this->owner()->owner()->is_local()) mutex()->Lock();
+ if (!category->owner()->owner()->is_local()) category->mutex()->Lock();
DCHECK(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
@@ -2065,6 +2083,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
+ if (!category->owner()->owner()->is_local()) category->mutex()->Unlock();
+ if (!this->owner()->owner()->is_local()) mutex()->Unlock();
}
return free_bytes;
}
@@ -2149,7 +2169,6 @@ FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
- DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
free_space->set_next(top());
set_top(free_space);
if (end_ == NULL) {
@@ -2173,7 +2192,13 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
}
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
+FreeList::FreeList(PagedSpace* owner)
+ : owner_(owner),
+ heap_(owner->heap()),
+ small_list_(this),
+ medium_list_(this),
+ large_list_(this),
+ huge_list_(this) {
Reset();
}
@@ -2204,7 +2229,7 @@ int FreeList::Free(Address start, int size_in_bytes) {
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) {
+ if (size_in_bytes <= kSmallListMin) {
page->add_non_available_small_blocks(size_in_bytes);
return size_in_bytes;
}
@@ -2356,16 +2381,14 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
+ owner_->SetTopAndLimit(nullptr, nullptr);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
int new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) {
- owner_->SetTopAndLimit(NULL, NULL);
- return NULL;
- }
+ if (new_node == nullptr) return nullptr;
int bytes_left = new_node_size - size_in_bytes;
DCHECK(bytes_left >= 0);
@@ -2409,10 +2432,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
- } else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
@@ -2457,7 +2476,7 @@ intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
FreeSpace* cur = top();
while (cur != NULL) {
- DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
+ DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
sum += cur->nobarrier_size();
cur = cur->next();
}
@@ -2523,7 +2542,10 @@ intptr_t PagedSpace::SizeOfObjects() {
DCHECK(!FLAG_concurrent_sweeping ||
heap()->mark_compact_collector()->sweeping_in_progress() ||
(unswept_free_bytes_ == 0));
- return Size() - unswept_free_bytes_ - (limit() - top());
+ const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
+ DCHECK_GE(size, 0);
+ USE(size);
+ return size;
}
@@ -2616,7 +2638,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Try to expand the space and allocate in the new next page.
if (Expand()) {
- DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
+ DCHECK((CountTotalPages() > 1) ||
+ (size_in_bytes <= free_list_.available()));
return free_list_.Allocate(size_in_bytes);
}
@@ -2933,19 +2956,27 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
}
+void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ HeapObject* object = current->GetObject();
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ DCHECK(Marking::IsBlackOrGrey(mark_bit));
+ Marking::BlackToWhite(mark_bit);
+ Page::FromAddress(object->address())->ResetProgressBar();
+ Page::FromAddress(object->address())->ResetLiveBytes();
+ current = current->next_page();
+ }
+}
+
+
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = NULL;
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- // Can this large page contain pointers to non-trivial objects. No other
- // pointer object is this big.
- bool is_pointer_object = object->IsFixedArray();
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) {
- Marking::BlackToWhite(mark_bit);
- Page::FromAddress(object->address())->ResetProgressBar();
- Page::FromAddress(object->address())->ResetLiveBytes();
previous = current;
current = current->next_page();
} else {
@@ -2976,14 +3007,9 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
static_cast<uint32_t>(key));
}
- if (is_pointer_object) {
- heap()->QueueMemoryChunkForFree(page);
- } else {
- heap()->isolate()->memory_allocator()->Free(page);
- }
+ heap()->QueueMemoryChunkForFree(page);
}
}
- heap()->FreeQueuedChunks();
}
@@ -2999,6 +3025,11 @@ bool LargeObjectSpace::Contains(HeapObject* object) {
}
+bool LargeObjectSpace::Contains(Address address) {
+ return FindPage(address) != NULL;
+}
+
+
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 2ea2e909aa..95e3b7c602 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SPACES_H_
#include "src/allocation.h"
+#include "src/atomic-utils.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/platform/mutex.h"
@@ -35,7 +36,7 @@ class Isolate;
// area.
//
// There is a separate large object space for objects larger than
-// Page::kMaxHeapObjectSize, so that they do not have to move during
+// Page::kMaxRegularHeapObjectSize, so that they do not have to move during
// collection. The large object space is paged. Pages in large object space
// may be larger than the page size.
//
@@ -91,13 +92,13 @@ class Isolate;
#define DCHECK_MAP_PAGE_INDEX(index) \
DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-class PagedSpace;
-class MemoryAllocator;
class AllocationInfo;
-class Space;
+class CompactionSpace;
class FreeList;
+class MemoryAllocator;
class MemoryChunk;
+class PagedSpace;
+class Space;
class MarkBit {
public:
@@ -164,6 +165,10 @@ class Bitmap {
return index >> kBitsPerCellLog2;
}
+ V8_INLINE static uint32_t IndexInCell(uint32_t index) {
+ return index & kBitIndexMask;
+ }
+
INLINE(static uint32_t CellToIndex(uint32_t index)) {
return index << kBitsPerCellLog2;
}
@@ -183,7 +188,7 @@ class Bitmap {
}
inline MarkBit MarkBitFromIndex(uint32_t index) {
- MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+ MarkBit::CellType mask = 1u << IndexInCell(index);
MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
return MarkBit(cell, mask);
}
@@ -255,6 +260,23 @@ class Bitmap {
}
return true;
}
+
+ // Clears all bits starting from {cell_base_index} up to and excluding
+ // {index}. Note that {cell_base_index} is required to be cell aligned.
+ void ClearRange(uint32_t cell_base_index, uint32_t index) {
+ DCHECK_EQ(IndexInCell(cell_base_index), 0);
+ DCHECK_GE(index, cell_base_index);
+ uint32_t start_cell_index = IndexToCell(cell_base_index);
+ uint32_t end_cell_index = IndexToCell(index);
+ DCHECK_GE(end_cell_index, start_cell_index);
+ // Clear all cells till the cell containing the last index.
+ for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
+ cells()[i] = 0;
+ }
+ // Clear all bits in the last cell till the last bit before index.
+ uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
+ cells()[end_cell_index] &= clear_mask;
+ }
};
@@ -267,37 +289,190 @@ class SlotsBuffer;
// any heap object.
class MemoryChunk {
public:
+ enum MemoryChunkFlags {
+ IS_EXECUTABLE,
+ ABOUT_TO_BE_FREED,
+ POINTERS_TO_HERE_ARE_INTERESTING,
+ POINTERS_FROM_HERE_ARE_INTERESTING,
+ SCAN_ON_SCAVENGE,
+ IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
+ IN_TO_SPACE, // All pages in new space has one of these two set.
+ NEW_SPACE_BELOW_AGE_MARK,
+ EVACUATION_CANDIDATE,
+ RESCAN_ON_EVACUATION,
+ NEVER_EVACUATE, // May contain immortal immutables.
+ POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
+
+ // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+ // otherwise marking bits are still intact.
+ WAS_SWEPT,
+
+ // Large objects can have a progress bar in their page header. These object
+ // are scanned in increments and will be kept black while being scanned.
+ // Even if the mutator writes to them they will be kept black and a white
+ // to grey transition is performed in the value.
+ HAS_PROGRESS_BAR,
+
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+
+ // The memory chunk is already logically freed, however the actual freeing
+ // still has to be performed.
+ PRE_FREED,
+
+ // Last flag, keep at bottom.
+ NUM_MEMORY_CHUNK_FLAGS
+ };
+
+ // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
+ // |kCompactingInProgress|: Parallel compaction is currently in progress.
+ // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
+ // be finalized.
+ // |kCompactingAborted|: Parallel compaction has been aborted, which should
+ // for now only happen in OOM scenarios.
+ enum ParallelCompactingState {
+ kCompactingDone,
+ kCompactingInProgress,
+ kCompactingFinalize,
+ kCompactingAborted,
+ };
+
+ // |kSweepingDone|: The page state when sweeping is complete or sweeping must
+ // not be performed on that page.
+ // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
+ // not touch the page memory anymore.
+ // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ // |kSweepingPending|: This page is ready for parallel sweeping.
+ enum ParallelSweepingState {
+ kSweepingDone,
+ kSweepingFinalize,
+ kSweepingInProgress,
+ kSweepingPending
+ };
+
+ // Every n write barrier invocations we go to runtime even though
+ // we could have handled it in generated code. This lets us check
+ // whether we have hit the limit and should do some more marking.
+ static const int kWriteBarrierCounterGranularity = 500;
+
+ static const int kPointersToHereAreInterestingMask =
+ 1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static const int kPointersFromHereAreInterestingMask =
+ 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
+
+ static const int kSkipEvacuationSlotsRecordingMask =
+ (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+ (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+
+ static const intptr_t kAlignment =
+ (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+ static const intptr_t kAlignmentMask = kAlignment - 1;
+
+ static const intptr_t kSizeOffset = 0;
+
+ static const intptr_t kLiveBytesOffset =
+ kSizeOffset + kPointerSize // size_t size
+ + kIntptrSize // intptr_t flags_
+ + kPointerSize // Address area_start_
+ + kPointerSize // Address area_end_
+ + 2 * kPointerSize // base::VirtualMemory reservation_
+ + kPointerSize // Address owner_
+ + kPointerSize // Heap* heap_
+ + kIntSize; // int store_buffer_counter_
+
+ static const size_t kSlotsBufferOffset =
+ kLiveBytesOffset + kIntSize; // int live_byte_count_
+
+ static const size_t kWriteBarrierCounterOffset =
+ kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
+ + kPointerSize; // SkipList* skip_list_;
+
+ static const size_t kMinHeaderSize =
+ kWriteBarrierCounterOffset +
+ kIntptrSize // intptr_t write_barrier_counter_
+ + kIntSize // int progress_bar_
+ + kPointerSize // AtomicValue high_water_mark_
+ + kPointerSize // base::Mutex* mutex_
+ + kPointerSize // base::AtomicWord parallel_sweeping_
+ + kPointerSize // AtomicValue parallel_compaction_
+ + 5 * kPointerSize // AtomicNumber free-list statistics
+ + kPointerSize // AtomicValue next_chunk_
+ + kPointerSize; // AtomicValue prev_chunk_
+
+ // We add some more space to the computed header size to amount for missing
+ // alignment requirements in our computation.
+ // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
+ static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
+
+ static const int kBodyOffset =
+ CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
+
+ // The start offset of the object area in a page. Aligned to both maps and
+ // code alignment to be suitable for both. Also aligned to 32 words because
+ // the marking bitmap is arranged in 32 bit chunks.
+ static const int kObjectStartAlignment = 32 * kPointerSize;
+ static const int kObjectStartOffset =
+ kBodyOffset - 1 +
+ (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+ static const int kFlagsOffset = kPointerSize;
+
+ static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
+
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
}
+
static const MemoryChunk* FromAddress(const byte* a) {
return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
~kAlignmentMask);
}
+ static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
+ MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
+ }
+
// Only works for addresses in pointer spaces, not data or code spaces.
static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
+ static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
+ const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+ return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+ }
+
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == nullptr) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationTop.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
+ intptr_t old_mark = 0;
+ do {
+ old_mark = chunk->high_water_mark_.Value();
+ } while ((new_mark > old_mark) &&
+ !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
+ }
+
Address address() { return reinterpret_cast<Address>(this); }
bool is_valid() { return address() != NULL; }
- MemoryChunk* next_chunk() const {
- return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
- }
+ MemoryChunk* next_chunk() { return next_chunk_.Value(); }
- MemoryChunk* prev_chunk() const {
- return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
- }
+ MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
- void set_next_chunk(MemoryChunk* next) {
- base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
- }
+ void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
- void set_prev_chunk(MemoryChunk* prev) {
- base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
- }
+ void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
Space* owner() const {
if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
@@ -318,8 +493,6 @@ class MemoryChunk {
base::VirtualMemory* reserved_memory() { return &reservation_; }
- void InitializeReservedMemory() { reservation_.Reset(); }
-
void set_reserved_memory(base::VirtualMemory* reservation) {
DCHECK_NOT_NULL(reservation);
reservation_.TakeControl(reservation);
@@ -351,59 +524,6 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
- // Every n write barrier invocations we go to runtime even though
- // we could have handled it in generated code. This lets us check
- // whether we have hit the limit and should do some more marking.
- static const int kWriteBarrierCounterGranularity = 500;
-
- enum MemoryChunkFlags {
- IS_EXECUTABLE,
- ABOUT_TO_BE_FREED,
- POINTERS_TO_HERE_ARE_INTERESTING,
- POINTERS_FROM_HERE_ARE_INTERESTING,
- SCAN_ON_SCAVENGE,
- IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
- IN_TO_SPACE, // All pages in new space has one of these two set.
- NEW_SPACE_BELOW_AGE_MARK,
- EVACUATION_CANDIDATE,
- RESCAN_ON_EVACUATION,
- NEVER_EVACUATE, // May contain immortal immutables.
- POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
-
- // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
- // otherwise marking bits are still intact.
- WAS_SWEPT,
-
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR,
-
- // This flag is intended to be used for testing. Works only when both
- // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
- // are set. It forces the page to become an evacuation candidate at next
- // candidates selection cycle.
- FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
-
- // Last flag, keep at bottom.
- NUM_MEMORY_CHUNK_FLAGS
- };
-
-
- static const int kPointersToHereAreInterestingMask =
- 1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const int kPointersFromHereAreInterestingMask =
- 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
-
- static const int kSkipEvacuationSlotsRecordingMask =
- (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
- (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
-
-
void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
@@ -430,27 +550,12 @@ class MemoryChunk {
// Return all current flags.
intptr_t GetFlags() { return flags_; }
-
- // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
- // not be performed on that page.
- // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
- // not touch the page memory anymore.
- // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
- // SWEEPING_PENDING - This page is ready for parallel sweeping.
- enum ParallelSweepingState {
- SWEEPING_DONE,
- SWEEPING_FINALIZE,
- SWEEPING_IN_PROGRESS,
- SWEEPING_PENDING
- };
-
- ParallelSweepingState parallel_sweeping() {
- return static_cast<ParallelSweepingState>(
- base::Acquire_Load(&parallel_sweeping_));
+ AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
+ return parallel_sweeping_;
}
- void set_parallel_sweeping(ParallelSweepingState state) {
- base::Release_Store(&parallel_sweeping_, state);
+ AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
+ return parallel_compaction_;
}
bool TryLock() { return mutex_->TryLock(); }
@@ -466,7 +571,9 @@ class MemoryChunk {
DCHECK(SweepingCompleted());
}
- bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
+ bool SweepingCompleted() {
+ return parallel_sweeping_state().Value() <= kSweepingFinalize;
+ }
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
@@ -523,49 +630,6 @@ class MemoryChunk {
progress_bar();
}
- static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
- MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
- }
-
- static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
-
- static const intptr_t kAlignment =
- (static_cast<uintptr_t>(1) << kPageSizeBits);
-
- static const intptr_t kAlignmentMask = kAlignment - 1;
-
- static const intptr_t kSizeOffset = 0;
-
- static const intptr_t kLiveBytesOffset =
- kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
- kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
- static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
- static const size_t kWriteBarrierCounterOffset =
- kSlotsBufferOffset + kPointerSize + kPointerSize;
-
- static const size_t kHeaderSize = kWriteBarrierCounterOffset +
- kPointerSize + // write_barrier_counter_
- kIntSize + // progress_bar_
- kIntSize + // high_water_mark_
- kPointerSize + // mutex_ page lock
- kPointerSize + // parallel_sweeping_
- 5 * kPointerSize + // free list statistics
- kPointerSize + // next_chunk_
- kPointerSize; // prev_chunk_
-
- static const int kBodyOffset =
- CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
- // The start offset of the object area in a page. Aligned to both maps and
- // code alignment to be suitable for both. Also aligned to 32 words because
- // the marking bitmap is arranged in 32 bit chunks.
- static const int kObjectStartAlignment = 32 * kPointerSize;
- static const int kObjectStartOffset =
- kBodyOffset - 1 +
- (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
@@ -587,7 +651,6 @@ class MemoryChunk {
bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
- // ---------------------------------------------------------------------
// Markbits support
inline Bitmap* markbits() {
@@ -600,12 +663,6 @@ class MemoryChunk {
return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
}
- inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
- const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
- return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
- }
-
inline Address MarkbitIndexToAddress(uint32_t index) {
return this->address() + (index << kPointerSizeLog2);
}
@@ -615,8 +672,6 @@ class MemoryChunk {
inline Heap* heap() const { return heap_; }
- static const int kFlagsOffset = kPointerSize;
-
bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
@@ -655,21 +710,16 @@ class MemoryChunk {
bool CommitArea(size_t requested);
// Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() { return high_water_mark_; }
+ size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
- static inline void UpdateHighWaterMark(Address mark) {
- if (mark == NULL) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationTop.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- int new_mark = static_cast<int>(mark - chunk->address());
- if (new_mark > chunk->high_water_mark_) {
- chunk->high_water_mark_ = new_mark;
- }
- }
+ // Should be called when memory chunk is about to be freed.
+ void ReleaseAllocatedMemory();
protected:
+ static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+ Address area_start, Address area_end,
+ Executability executable, Space* owner);
+
size_t size_;
intptr_t flags_;
@@ -697,35 +747,32 @@ class MemoryChunk {
int progress_bar_;
// Assuming the initial allocation on a page is sequential,
// count highest number of bytes ever allocated on the page.
- int high_water_mark_;
+ AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_;
- base::AtomicWord parallel_sweeping_;
+ AtomicValue<ParallelSweepingState> parallel_sweeping_;
+ AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
- int available_in_small_free_list_;
- int available_in_medium_free_list_;
- int available_in_large_free_list_;
- int available_in_huge_free_list_;
- int non_available_small_blocks_;
+ AtomicNumber<intptr_t> available_in_small_free_list_;
+ AtomicNumber<intptr_t> available_in_medium_free_list_;
+ AtomicNumber<intptr_t> available_in_large_free_list_;
+ AtomicNumber<intptr_t> available_in_huge_free_list_;
+ AtomicNumber<intptr_t> non_available_small_blocks_;
- static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
- Address area_start, Address area_end,
- Executability executable, Space* owner);
-
- private:
// next_chunk_ holds a pointer of type MemoryChunk
- base::AtomicWord next_chunk_;
+ AtomicValue<MemoryChunk*> next_chunk_;
// prev_chunk_ holds a pointer of type MemoryChunk
- base::AtomicWord prev_chunk_;
+ AtomicValue<MemoryChunk*> prev_chunk_;
+
+ private:
+ void InitializeReservedMemory() { reservation_.Reset(); }
friend class MemoryAllocator;
+ friend class MemoryChunkValidator;
};
-STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
-
-
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -808,21 +855,22 @@ class Page : public MemoryChunk {
void ResetFreeListStatistics();
int LiveBytesFromFreeList() {
- return area_size() - non_available_small_blocks_ -
- available_in_small_free_list_ - available_in_medium_free_list_ -
- available_in_large_free_list_ - available_in_huge_free_list_;
+ return static_cast<int>(
+ area_size() - non_available_small_blocks() -
+ available_in_small_free_list() - available_in_medium_free_list() -
+ available_in_large_free_list() - available_in_huge_free_list());
}
-#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
- type name() { return name##_; } \
- void set_##name(type name) { name##_ = name; } \
- void add_##name(type name) { name##_ += name; }
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
+ type name() { return name##_.Value(); } \
+ void set_##name(type name) { name##_.SetValue(name); } \
+ void add_##name(type name) { name##_.Increment(name); }
- FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks)
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list)
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list)
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list)
- FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
#undef FRAGMENTATION_STATS_ACCESSORS
@@ -834,14 +882,11 @@ class Page : public MemoryChunk {
};
-STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-
-
class LargePage : public MemoryChunk {
public:
HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
- inline LargePage* next_page() const {
+ inline LargePage* next_page() {
return static_cast<LargePage*>(next_chunk());
}
@@ -853,7 +898,6 @@ class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
-STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
@@ -907,6 +951,23 @@ class Space : public Malloced {
};
+class MemoryChunkValidator {
+ // Computed offsets should match the compiler generated ones.
+ STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
+ STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
+ offsetof(MemoryChunk, live_byte_count_));
+ STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
+ offsetof(MemoryChunk, slots_buffer_));
+ STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
+ offsetof(MemoryChunk, write_barrier_counter_));
+
+ // Validate our estimates on the header size.
+ STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+ STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+ STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
+};
+
+
// ----------------------------------------------------------------------------
// All heap objects containing executable code (code objects) must be allocated
// from a 2 GB range of memory, so that they can call each other using 32-bit
@@ -924,10 +985,6 @@ class CodeRange {
// Returns false on failure.
bool SetUp(size_t requested_size);
- // Frees the range of virtual memory, and frees the data structures used to
- // manage it.
- void TearDown();
-
bool valid() { return code_range_ != NULL; }
Address start() {
DCHECK(valid());
@@ -953,10 +1010,11 @@ class CodeRange {
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
- void ReserveEmergencyBlock();
- void ReleaseEmergencyBlock();
-
private:
+ // Frees the range of virtual memory, and frees the data structures used to
+ // manage it.
+ void TearDown();
+
Isolate* isolate_;
// The reserved range of virtual memory that all code objects are put in.
@@ -980,21 +1038,20 @@ class CodeRange {
size_t size;
};
+ // The global mutex guards free_list_ and allocation_list_ as GC threads may
+ // access both lists concurrently to the main thread.
+ base::Mutex code_range_mutex_;
+
// Freed blocks of memory are added to the free list. When the allocation
// list is exhausted, the free list is sorted and merged to make the new
// allocation list.
List<FreeBlock> free_list_;
+
// Memory is allocated from the free blocks on the allocation list.
// The block at current_allocation_block_index_ is the current block.
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
- // Emergency block guarantees that we can always allocate a page for
- // evacuation candidates when code space is compacted. Emergency block is
- // reserved immediately after GC and is released immedietely before
- // allocating a page for evacuation.
- FreeBlock emergency_block_;
-
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
@@ -1079,23 +1136,36 @@ class MemoryAllocator {
LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
Executability executable);
- void Free(MemoryChunk* chunk);
+ // PreFree logically frees the object, i.e., it takes care of the size
+ // bookkeeping and calls the allocation callback.
+ void PreFreeMemory(MemoryChunk* chunk);
- // Returns the maximum available bytes of heaps.
- intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+ // FreeMemory can be called concurrently when PreFree was executed before.
+ void PerformFreeMemory(MemoryChunk* chunk);
+
+ // Free is a wrapper method, which calls PreFree and PerformFreeMemory
+ // together.
+ void Free(MemoryChunk* chunk);
// Returns allocated spaces in bytes.
- intptr_t Size() { return size_; }
+ intptr_t Size() { return size_.Value(); }
+
+ // Returns allocated executable spaces in bytes.
+ intptr_t SizeExecutable() { return size_executable_.Value(); }
+
+ // Returns the maximum available bytes of heaps.
+ intptr_t Available() {
+ intptr_t size = Size();
+ return capacity_ < size ? 0 : capacity_ - size;
+ }
// Returns the maximum available executable bytes of heaps.
intptr_t AvailableExecutable() {
- if (capacity_executable_ < size_executable_) return 0;
- return capacity_executable_ - size_executable_;
+ intptr_t executable_size = SizeExecutable();
+ if (capacity_executable_ < executable_size) return 0;
+ return capacity_executable_ - executable_size;
}
- // Returns allocated executable spaces in bytes.
- intptr_t SizeExecutable() { return size_executable_; }
-
// Returns maximum available bytes that the old space can have.
intptr_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
@@ -1103,9 +1173,9 @@ class MemoryAllocator {
// Returns an indication of whether a pointer is in a space that has
// been allocated by this MemoryAllocator.
- V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
- return address < lowest_ever_allocated_ ||
- address >= highest_ever_allocated_;
+ V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
+ return address < lowest_ever_allocated_.Value() ||
+ address >= highest_ever_allocated_.Value();
}
#ifdef DEBUG
@@ -1128,6 +1198,8 @@ class MemoryAllocator {
bool CommitMemory(Address addr, size_t size, Executability executable);
+ void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
+ Executability executable);
void FreeMemory(base::VirtualMemory* reservation, Executability executable);
void FreeMemory(Address addr, size_t size, Executability executable);
@@ -1183,22 +1255,22 @@ class MemoryAllocator {
Isolate* isolate_;
// Maximum space size in bytes.
- size_t capacity_;
+ intptr_t capacity_;
// Maximum subset of capacity_ that can be executable
- size_t capacity_executable_;
+ intptr_t capacity_executable_;
// Allocated space size in bytes.
- size_t size_;
+ AtomicNumber<intptr_t> size_;
// Allocated executable space size in bytes.
- size_t size_executable_;
+ AtomicNumber<intptr_t> size_executable_;
// We keep the lowest and highest addresses allocated as a quick way
// of determining that pointers are outside the heap. The estimate is
// conservative, i.e. not all addrsses in 'allocated' space are allocated
// to our heap. The range is [lowest, highest[, inclusive on the low end
// and exclusive on the high end.
- void* lowest_ever_allocated_;
- void* highest_ever_allocated_;
+ AtomicValue<void*> lowest_ever_allocated_;
+ AtomicValue<void*> highest_ever_allocated_;
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
@@ -1221,8 +1293,16 @@ class MemoryAllocator {
PagedSpace* owner);
void UpdateAllocatedSpaceLimits(void* low, void* high) {
- lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
- highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+ // The use of atomic primitives does not guarantee correctness (wrt.
+ // desired semantics) by default. The loop here ensures that we update the
+ // values only if they did not change in between.
+ void* ptr = nullptr;
+ do {
+ ptr = lowest_ever_allocated_.Value();
+ } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
+ do {
+ ptr = highest_ever_allocated_.Value();
+ } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
}
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
@@ -1443,6 +1523,23 @@ class AllocationStats BASE_EMBEDDED {
waste_ += size_in_bytes;
}
+ // Merge {other} into {this}.
+ void Merge(const AllocationStats& other) {
+ capacity_ += other.capacity_;
+ size_ += other.size_;
+ waste_ += other.waste_;
+ if (other.max_capacity_ > max_capacity_) {
+ max_capacity_ = other.max_capacity_;
+ }
+ }
+
+ void DecreaseCapacity(intptr_t size_in_bytes) {
+ capacity_ -= size_in_bytes;
+ DCHECK_GE(capacity_, 0);
+ }
+
+ void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
+
private:
intptr_t capacity_;
intptr_t max_capacity_;
@@ -1458,7 +1555,8 @@ class AllocationStats BASE_EMBEDDED {
// the end element of the linked list of free memory blocks.
class FreeListCategory {
public:
- FreeListCategory() : top_(0), end_(NULL), available_(0) {}
+ explicit FreeListCategory(FreeList* owner)
+ : top_(0), end_(NULL), available_(0), owner_(owner) {}
intptr_t Concatenate(FreeListCategory* category);
@@ -1498,6 +1596,8 @@ class FreeListCategory {
int FreeListLength();
#endif
+ FreeList* owner() { return owner_; }
+
private:
// top_ points to the top FreeSpace* in the free list category.
base::AtomicWord top_;
@@ -1506,6 +1606,8 @@ class FreeListCategory {
// Total available bytes in all blocks of this free list category.
int available_;
+
+ FreeList* owner_;
};
@@ -1558,7 +1660,7 @@ class FreeList {
// This method returns how much memory can be allocated after freeing
// maximum_freed memory.
static inline int GuaranteedAllocatable(int maximum_freed) {
- if (maximum_freed < kSmallListMin) {
+ if (maximum_freed <= kSmallListMin) {
return 0;
} else if (maximum_freed <= kSmallListMax) {
return kSmallAllocationMax;
@@ -1598,24 +1700,25 @@ class FreeList {
FreeListCategory* large_list() { return &large_list_; }
FreeListCategory* huge_list() { return &huge_list_; }
- static const int kSmallListMin = 0x20 * kPointerSize;
+ PagedSpace* owner() { return owner_; }
private:
// The size range of blocks, in bytes.
static const int kMinBlockSize = 3 * kPointerSize;
static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
- FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
-
- PagedSpace* owner_;
- Heap* heap_;
-
+ static const int kSmallListMin = 0x1f * kPointerSize;
static const int kSmallListMax = 0xff * kPointerSize;
static const int kMediumListMax = 0x7ff * kPointerSize;
static const int kLargeListMax = 0x3fff * kPointerSize;
- static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+ static const int kSmallAllocationMax = kSmallListMin;
static const int kMediumAllocationMax = kSmallListMax;
static const int kLargeAllocationMax = kMediumListMax;
+
+ FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+
+ PagedSpace* owner_;
+ Heap* heap_;
FreeListCategory small_list_;
FreeListCategory medium_list_;
FreeListCategory large_list_;
@@ -1673,7 +1776,7 @@ class PagedSpace : public Space {
// Creates a space with an id.
PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
- virtual ~PagedSpace() {}
+ virtual ~PagedSpace() { TearDown(); }
// Set up the space using the given address range of virtual memory (from
// the memory allocator's initial chunk) if possible. If the block of
@@ -1685,10 +1788,6 @@ class PagedSpace : public Space {
// subsequently torn down.
bool HasBeenSetUp();
- // Cleans up the space, frees all pages in this space except those belonging
- // to the initial chunk, uncommits addresses in the initial chunk.
- void TearDown();
-
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
inline bool Contains(HeapObject* o);
@@ -1777,6 +1876,9 @@ class PagedSpace : public Space {
MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
int size_in_bytes);
+ MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
+ int size_in_bytes);
+
// Allocate the requested number of bytes in the space double aligned if
// possible, return a failure object if not.
MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
@@ -1888,7 +1990,7 @@ class PagedSpace : public Space {
void EvictEvacuationCandidatesFromFreeLists();
- bool CanExpand();
+ bool CanExpand(size_t size);
// Returns the number of total pages in this space.
int CountTotalPages();
@@ -1896,44 +1998,26 @@ class PagedSpace : public Space {
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
- void CreateEmergencyMemory();
- void FreeEmergencyMemory();
- void UseEmergencyMemory();
- intptr_t MaxEmergencyMemoryAllocated();
-
- bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
-
- protected:
- FreeList* free_list() { return &free_list_; }
+ // Merges {other} into the current space. Note that this modifies {other},
+ // e.g., removes its bump pointer area and resets statistics.
+ void MergeCompactionSpace(CompactionSpace* other);
- int area_size_;
-
- // Accounting information for this space.
- AllocationStats accounting_stats_;
-
- // The dummy page that anchors the double linked list of pages.
- Page anchor_;
+ void MoveOverFreeMemory(PagedSpace* other);
- // The space's free list.
- FreeList free_list_;
+ virtual bool is_local() { return false; }
- // Normal allocation information.
- AllocationInfo allocation_info_;
+ protected:
+ // PagedSpaces that should be included in snapshots have different, i.e.,
+ // smaller, initial pages.
+ virtual bool snapshotable() { return true; }
- // The number of free bytes which could be reclaimed by advancing the
- // concurrent sweeper threads.
- intptr_t unswept_free_bytes_;
+ FreeList* free_list() { return &free_list_; }
- // The sweeper threads iterate over the list of pointer and data space pages
- // and sweep these pages concurrently. They will stop sweeping after the
- // end_of_unswept_pages_ page.
- Page* end_of_unswept_pages_;
+ bool HasPages() { return anchor_.next_page() != &anchor_; }
- // Emergency memory is the memory of a full page for a given space, allocated
- // conservatively before evacuating a page. If compaction fails due to out
- // of memory error the emergency memory can be used to complete compaction.
- // If not used, the emergency memory is released after compaction.
- MemoryChunk* emergency_memory_;
+ // Cleans up the space, frees all pages in this space except those belonging
+ // to the initial chunk, uncommits addresses in the initial chunk.
+ void TearDown();
// Expands the space by allocating a fixed number of pages. Returns false if
// it cannot allocate requested number of pages from OS, or if the hard heap
@@ -1959,8 +2043,34 @@ class PagedSpace : public Space {
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
- friend class PageIterator;
+ int area_size_;
+
+ // Accounting information for this space.
+ AllocationStats accounting_stats_;
+
+ // The dummy page that anchors the double linked list of pages.
+ Page anchor_;
+
+ // The space's free list.
+ FreeList free_list_;
+
+ // Normal allocation information.
+ AllocationInfo allocation_info_;
+
+ // The number of free bytes which could be reclaimed by advancing the
+ // concurrent sweeper threads.
+ intptr_t unswept_free_bytes_;
+
+ // The sweeper threads iterate over the list of pointer and data space pages
+ // and sweep these pages concurrently. They will stop sweeping after the
+ // end_of_unswept_pages_ page.
+ Page* end_of_unswept_pages_;
+
+ // Mutex guarding any concurrent access to the space.
+ base::Mutex space_mutex_;
+
friend class MarkCompactCollector;
+ friend class PageIterator;
};
@@ -2016,13 +2126,13 @@ class NewSpacePage : public MemoryChunk {
static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
- inline NewSpacePage* next_page() const {
+ inline NewSpacePage* next_page() {
return static_cast<NewSpacePage*>(next_chunk());
}
inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
- inline NewSpacePage* prev_page() const {
+ inline NewSpacePage* prev_page() {
return static_cast<NewSpacePage*>(prev_chunk());
}
@@ -2337,7 +2447,8 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
- inline_allocation_limit_step_(0) {}
+ inline_allocation_limit_step_(0),
+ top_on_previous_step_(0) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
@@ -2460,11 +2571,6 @@ class NewSpace : public Space {
return allocation_info_.top();
}
- void set_top(Address top) {
- DCHECK(to_space_.current_page()->ContainsLimit(top));
- allocation_info_.set_top(top);
- }
-
// Return the address of the allocation pointer limit in the active semispace.
Address limit() {
DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
@@ -2519,7 +2625,7 @@ class NewSpace : public Space {
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
UpdateInlineAllocationLimit(0);
- top_on_previous_step_ = allocation_info_.top();
+ top_on_previous_step_ = step ? allocation_info_.top() : 0;
}
// Get the extent of the inactive semispace (for use as a marking stack,
@@ -2631,9 +2737,65 @@ class NewSpace : public Space {
bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
+ // If we are doing inline allocation in steps, this method performs the 'step'
+ // operation. Right now incremental marking is the only consumer of inline
+ // allocation steps. top is the memory address of the bump pointer at the last
+ // inline allocation (i.e. it determines the numbers of bytes actually
+ // allocated since the last step.) new_top is the address of the bump pointer
+ // where the next byte is going to be allocated from. top and new_top may be
+ // different when we cross a page boundary or reset the space.
+ void InlineAllocationStep(Address top, Address new_top);
+
friend class SemiSpaceIterator;
};
+// -----------------------------------------------------------------------------
+// Compaction space that is used temporarily during compaction.
+
+class CompactionSpace : public PagedSpace {
+ public:
+ CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
+ : PagedSpace(heap, id, executable) {}
+
+ // Adds external memory starting at {start} of {size_in_bytes} to the space.
+ void AddExternalMemory(Address start, int size_in_bytes) {
+ IncreaseCapacity(size_in_bytes);
+ Free(start, size_in_bytes);
+ }
+
+ virtual bool is_local() { return true; }
+
+ protected:
+ // The space is temporary and not included in any snapshots.
+ virtual bool snapshotable() { return false; }
+};
+
+
+// A collection of |CompactionSpace|s used by a single compaction task.
+class CompactionSpaceCollection : public Malloced {
+ public:
+ explicit CompactionSpaceCollection(Heap* heap)
+ : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
+ code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
+
+ CompactionSpace* Get(AllocationSpace space) {
+ switch (space) {
+ case OLD_SPACE:
+ return &old_space_;
+ case CODE_SPACE:
+ return &code_space_;
+ default:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return nullptr;
+ }
+
+ private:
+ CompactionSpace old_space_;
+ CompactionSpace code_space_;
+};
+
// -----------------------------------------------------------------------------
// Old object space (includes the old space of objects and code space)
@@ -2693,9 +2855,9 @@ class MapSpace : public PagedSpace {
// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
-// the large object space. A large object is allocated from OS heap with
-// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
+// managed by the large object space. A large object is allocated from OS
+// heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
// A large object always starts at Page::kObjectStartOffset to a page.
// Large objects do not move during garbage collections.
@@ -2744,11 +2906,15 @@ class LargeObjectSpace : public Space {
// if such a page doesn't exist.
LargePage* FindPage(Address a);
+ // Clears the marking state of live objects.
+ void ClearMarkingStateOfLiveObjects();
+
// Frees unmarked objects.
void FreeUnmarkedObjects();
// Checks whether a heap object is in this space; O(1).
bool Contains(HeapObject* obj);
+ bool Contains(Address address);
// Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; }
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
index f168fe0a59..1f3dda21d2 100644
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -6,21 +6,17 @@
#define V8_STORE_BUFFER_INL_H_
#include "src/heap/heap.h"
+#include "src/heap/spaces-inl.h"
#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
-Address StoreBuffer::TopAddress() {
- return reinterpret_cast<Address>(heap_->store_buffer_top_address());
-}
-
-
void StoreBuffer::Mark(Address addr) {
DCHECK(!heap_->code_space()->Contains(addr));
Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
*top++ = addr;
- heap_->public_set_store_buffer_top(top);
+ heap_->set_store_buffer_top(reinterpret_cast<Smi*>(top));
if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
DCHECK(top == limit_);
Compact();
@@ -30,6 +26,12 @@ void StoreBuffer::Mark(Address addr) {
}
+inline void StoreBuffer::MarkSynchronized(Address addr) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ Mark(addr);
+}
+
+
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (store_buffer_rebuilding_enabled_) {
SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 900b8fe1b0..2ed9deccff 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -7,7 +7,10 @@
#include <algorithm>
#include "src/counters.h"
+#include "src/heap/incremental-marking.h"
#include "src/heap/store-buffer-inl.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
#include "src/v8.h"
namespace v8 {
@@ -86,7 +89,7 @@ void StoreBuffer::SetUp() {
false)) { // Not executable.
V8::FatalProcessOutOfMemory("StoreBuffer::SetUp");
}
- heap_->public_set_store_buffer_top(start_);
+ heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
hash_set_1_ = new uintptr_t[kHashSetLength];
hash_set_2_ = new uintptr_t[kHashSetLength];
@@ -103,7 +106,7 @@ void StoreBuffer::TearDown() {
delete[] hash_set_2_;
old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
- heap_->public_set_store_buffer_top(start_);
+ heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
}
@@ -492,6 +495,11 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
obj_address + FixedTypedArrayBase::kBasePointerOffset,
obj_address + FixedTypedArrayBase::kHeaderSize,
slot_callback);
+ } else if (heap_object->IsBytecodeArray()) {
+ FindPointersToNewSpaceInRegion(
+ obj_address + BytecodeArray::kConstantPoolOffset,
+ obj_address + BytecodeArray::kHeaderSize,
+ slot_callback);
} else if (heap_object->IsJSArrayBuffer()) {
FindPointersToNewSpaceInRegion(
obj_address +
@@ -545,7 +553,7 @@ void StoreBuffer::Compact() {
// There's no check of the limit in the loop below so we check here for
// the worst case (compaction doesn't eliminate any pointers).
DCHECK(top <= limit_);
- heap_->public_set_store_buffer_top(start_);
+ heap_->set_store_buffer_top(reinterpret_cast<Smi*>(start_));
EnsureSpace(top - start_);
DCHECK(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
@@ -587,5 +595,56 @@ void StoreBuffer::Compact() {
}
heap_->isolate()->counters()->store_buffer_compactions()->Increment();
}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+ if (event == kStoreBufferStartScanningPagesEvent) {
+ start_of_current_page_ = NULL;
+ current_page_ = NULL;
+ } else if (event == kStoreBufferScanningPageEvent) {
+ if (current_page_ != NULL) {
+ // If this page already overflowed the store buffer during this iteration.
+ if (current_page_->scan_on_scavenge()) {
+ // Then we should wipe out the entries that have been added for it.
+ store_buffer_->SetTop(start_of_current_page_);
+ } else if (store_buffer_->Top() - start_of_current_page_ >=
+ (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+ // Did we find too many pointers in the previous page? The heuristic is
+ // that no page can take more then 1/5 the remaining slots in the store
+ // buffer.
+ current_page_->set_scan_on_scavenge(true);
+ store_buffer_->SetTop(start_of_current_page_);
+ } else {
+ // In this case the page we scanned took a reasonable number of slots in
+ // the store buffer. It has now been rehabilitated and is no longer
+ // marked scan_on_scavenge.
+ DCHECK(!current_page_->scan_on_scavenge());
+ }
+ }
+ start_of_current_page_ = store_buffer_->Top();
+ current_page_ = page;
+ } else if (event == kStoreBufferFullEvent) {
+ // The current page overflowed the store buffer again. Wipe out its entries
+ // in the store buffer and mark it scan-on-scavenge again. This may happen
+ // several times while scanning.
+ if (current_page_ == NULL) {
+ // Store Buffer overflowed while scanning promoted objects. These are not
+ // in any particular page, though they are likely to be clustered by the
+ // allocation routines.
+ store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
+ } else {
+ // Store Buffer overflowed while scanning a particular old space page for
+ // pointers to new space.
+ DCHECK(current_page_ == page);
+ DCHECK(page != NULL);
+ current_page_->set_scan_on_scavenge(true);
+ DCHECK(start_of_current_page_ != store_buffer_->Top());
+ store_buffer_->SetTop(start_of_current_page_);
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 6c571fcdce..37a78eb075 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -30,14 +30,16 @@ class StoreBuffer {
static void StoreBufferOverflow(Isolate* isolate);
- inline Address TopAddress();
-
void SetUp();
void TearDown();
- // This is used by the mutator to enter addresses into the store buffer.
+ // This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
+ // This is used to add addresses to the store buffer when multiple threads
+ // may operate on the store buffer.
+ inline void MarkSynchronized(Address addr);
+
// This is used by the heap traversal to enter the addresses into the store
// buffer that should still be in the store buffer after GC. It enters
// addresses directly into the old buffer because the GC starts by wiping the
@@ -131,6 +133,9 @@ class StoreBuffer {
uintptr_t* hash_set_2_;
bool hash_sets_are_empty_;
+ // Used for synchronization of concurrent store buffer access.
+ base::Mutex mutex_;
+
void ClearFilteringHashSets();
bool SpaceAvailable(intptr_t space_needed);
@@ -162,6 +167,26 @@ class StoreBuffer {
};
+class StoreBufferRebuilder {
+ public:
+ explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+ : store_buffer_(store_buffer) {}
+
+ void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+ StoreBuffer* store_buffer_;
+
+ // We record in this variable how full the store buffer was when we started
+ // iterating over the current page, finding pointers to new space. If the
+ // store buffer overflows again we can exempt the page from the store buffer
+ // by rewinding to this point instead of having to search the store buffer.
+ Object*** start_of_current_page_;
+ // The current page we are scanning in the store buffer iterator.
+ MemoryChunk* current_page_;
+};
+
+
class StoreBufferRebuildScope {
public:
explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
diff --git a/deps/v8/src/hydrogen-dce.cc b/deps/v8/src/hydrogen-dce.cc
index 1d5bb7841a..50a300bd94 100644
--- a/deps/v8/src/hydrogen-dce.cc
+++ b/deps/v8/src/hydrogen-dce.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/hydrogen-dce.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 770d710a3a..4482155fbe 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/hydrogen-instructions.h"
#include "src/base/bits.h"
#include "src/double.h"
@@ -74,33 +74,26 @@ void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
Representation HValue::RepresentationFromUses() {
if (HasNoUses()) return Representation::None();
-
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
+ Representation result = Representation::None();
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
Representation rep = use->observed_input_representation(it.index());
- if (rep.IsNone()) continue;
+ result = result.generalize(rep);
+
if (FLAG_trace_representation) {
PrintF("#%d %s is used by #%d %s as %s%s\n",
id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
(use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
}
- use_count[rep.kind()] += 1;
}
- if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
- int smi_count = use_count[Representation::kSmi];
-
- if (tagged_count > 0) return Representation::Tagged();
- if (double_count > 0) return Representation::Double();
- if (int32_count > 0) return Representation::Integer32();
- if (smi_count > 0) return Representation::Smi();
+ if (IsPhi()) {
+ result = result.generalize(
+ HPhi::cast(this)->representation_from_indirect_uses());
+ }
- return Representation::None();
+ // External representations are dealt with separately.
+ return result.IsExternal() ? Representation::None() : result;
}
@@ -811,9 +804,8 @@ bool HInstruction::CanDeoptimize() {
case HValue::kHasInstanceTypeAndBranch:
case HValue::kInnerAllocatedObject:
case HValue::kInstanceOf:
- case HValue::kInstanceOfKnownGlobal:
case HValue::kIsConstructCallAndBranch:
- case HValue::kIsObjectAndBranch:
+ case HValue::kHasInPrototypeChainAndBranch:
case HValue::kIsSmiAndBranch:
case HValue::kIsStringAndBranch:
case HValue::kIsUndetectableAndBranch:
@@ -869,7 +861,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kDiv:
case HValue::kForInCacheArray:
case HValue::kForInPrepareMap:
- case HValue::kFunctionLiteral:
case HValue::kInvokeFunction:
case HValue::kLoadContextSlot:
case HValue::kLoadFunctionPrototype:
@@ -881,6 +872,7 @@ bool HInstruction::CanDeoptimize() {
case HValue::kMul:
case HValue::kOsrEntry:
case HValue::kPower:
+ case HValue::kPrologue:
case HValue::kRor:
case HValue::kSar:
case HValue::kSeqStringSetChar:
@@ -934,8 +926,7 @@ std::ostream& HCallJSFunction::PrintDataTo(std::ostream& os) const { // NOLINT
HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
HValue* context, HValue* function,
- int argument_count,
- bool pass_argument_count) {
+ int argument_count) {
bool has_stack_check = false;
if (function->IsConstant()) {
HConstant* fun_const = HConstant::cast(function);
@@ -946,9 +937,7 @@ HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
}
- return new(zone) HCallJSFunction(
- function, argument_count, pass_argument_count,
- has_stack_check);
+ return new (zone) HCallJSFunction(function, argument_count, has_stack_check);
}
@@ -1099,7 +1088,7 @@ std::ostream& HCallNewArray::PrintDataTo(std::ostream& os) const { // NOLINT
std::ostream& HCallRuntime::PrintDataTo(std::ostream& os) const { // NOLINT
- os << name()->ToCString().get() << " ";
+ os << function()->name << " ";
if (save_doubles() == kSaveFPRegs) os << "[save doubles] ";
return os << "#" << argument_count();
}
@@ -1306,7 +1295,9 @@ std::ostream& HTypeofIsAndBranch::PrintDataTo(
}
-static String* TypeOfString(HConstant* constant, Isolate* isolate) {
+namespace {
+
+String* TypeOfString(HConstant* constant, Isolate* isolate) {
Heap* heap = isolate->heap();
if (constant->HasNumberValue()) return heap->number_string();
if (constant->IsUndetectable()) return heap->undefined_string();
@@ -1337,14 +1328,14 @@ static String* TypeOfString(HConstant* constant, Isolate* isolate) {
UNREACHABLE();
return nullptr;
}
- case JS_FUNCTION_TYPE:
- case JS_FUNCTION_PROXY_TYPE:
- return heap->function_string();
default:
+ if (constant->IsCallable()) return heap->function_string();
return heap->object_string();
}
}
+} // namespace
+
bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
if (FLAG_fold_constants && value()->IsConstant()) {
@@ -1746,7 +1737,7 @@ std::ostream& HCheckInstanceType::PrintDataTo(
std::ostream& HCallStub::PrintDataTo(std::ostream& os) const { // NOLINT
- os << CodeStub::MajorName(major_key_, false) << " ";
+ os << CodeStub::MajorName(major_key_) << " ";
return HUnaryCall::PrintDataTo(os);
}
@@ -2484,11 +2475,8 @@ std::ostream& HPhi::PrintTo(std::ostream& os) const { // NOLINT
for (int i = 0; i < OperandCount(); ++i) {
os << " " << NameOf(OperandAt(i)) << " ";
}
- return os << " uses:" << UseCount() << "_"
- << smi_non_phi_uses() + smi_indirect_uses() << "s_"
- << int32_non_phi_uses() + int32_indirect_uses() << "i_"
- << double_non_phi_uses() + double_indirect_uses() << "d_"
- << tagged_non_phi_uses() + tagged_indirect_uses() << "t"
+ return os << " uses" << UseCount()
+ << representation_from_indirect_uses().Mnemonic() << " "
<< TypeOf(this) << "]";
}
@@ -2547,7 +2535,12 @@ void HPhi::InitRealUses(int phi_id) {
HValue* value = it.value();
if (!value->IsPhi()) {
Representation rep = value->observed_input_representation(it.index());
- non_phi_uses_[rep.kind()] += 1;
+ representation_from_non_phi_uses_ =
+ representation_from_non_phi_uses().generalize(rep);
+ if (rep.IsSmi() || rep.IsInteger32() || rep.IsDouble()) {
+ has_type_feedback_from_uses_ = true;
+ }
+
if (FLAG_trace_representation) {
PrintF("#%d Phi is used by real #%d %s as %s\n",
id(), value->id(), value->Mnemonic(), rep.Mnemonic());
@@ -2567,24 +2560,16 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to #%d Phi uses of #%d Phi: s%d i%d d%d t%d\n",
- id(), other->id(),
- other->non_phi_uses_[Representation::kSmi],
- other->non_phi_uses_[Representation::kInteger32],
- other->non_phi_uses_[Representation::kDouble],
- other->non_phi_uses_[Representation::kTagged]);
+ PrintF(
+ "generalizing use representation '%s' of #%d Phi "
+ "with uses of #%d Phi '%s'\n",
+ representation_from_indirect_uses().Mnemonic(), id(), other->id(),
+ other->representation_from_non_phi_uses().Mnemonic());
}
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- indirect_uses_[i] += other->non_phi_uses_[i];
- }
-}
-
-
-void HPhi::AddIndirectUsesTo(int* dest) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- dest[i] += indirect_uses_[i];
- }
+ representation_from_indirect_uses_ =
+ representation_from_indirect_uses().generalize(
+ other->representation_from_non_phi_uses());
}
@@ -2718,15 +2703,15 @@ HConstant::HConstant(Handle<Object> object, Representation r)
: HTemplateInstruction<0>(HType::FromValue(object)),
object_(Unique<Object>::CreateUninitialized(object)),
object_map_(Handle<Map>::null()),
- bit_field_(HasStableMapValueField::encode(false) |
- HasSmiValueField::encode(false) |
- HasInt32ValueField::encode(false) |
- HasDoubleValueField::encode(false) |
- HasExternalReferenceValueField::encode(false) |
- IsNotInNewSpaceField::encode(true) |
- BooleanValueField::encode(object->BooleanValue()) |
- IsUndetectableField::encode(false) |
- InstanceTypeField::encode(kUnknownInstanceType)) {
+ bit_field_(
+ HasStableMapValueField::encode(false) |
+ HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) |
+ HasDoubleValueField::encode(false) |
+ HasExternalReferenceValueField::encode(false) |
+ IsNotInNewSpaceField::encode(true) |
+ BooleanValueField::encode(object->BooleanValue()) |
+ IsUndetectableField::encode(false) | IsCallableField::encode(false) |
+ InstanceTypeField::encode(kUnknownInstanceType)) {
if (object->IsHeapObject()) {
Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
Isolate* isolate = heap_object->GetIsolate();
@@ -2736,6 +2721,7 @@ HConstant::HConstant(Handle<Object> object, Representation r)
bit_field_ = InstanceTypeField::update(bit_field_, map->instance_type());
bit_field_ =
IsUndetectableField::update(bit_field_, map->is_undetectable());
+ bit_field_ = IsCallableField::update(bit_field_, map->is_callable());
if (map->is_stable()) object_map_ = Unique<Map>::CreateImmovable(map);
bit_field_ = HasStableMapValueField::update(
bit_field_,
@@ -3273,29 +3259,6 @@ bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
}
-bool ConstantIsObject(HConstant* constant, Isolate* isolate) {
- if (constant->HasNumberValue()) return false;
- if (constant->GetUnique().IsKnownGlobal(isolate->heap()->null_value())) {
- return true;
- }
- if (constant->IsUndetectable()) return false;
- InstanceType type = constant->GetInstanceType();
- return (FIRST_NONCALLABLE_SPEC_OBJECT_TYPE <= type) &&
- (type <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
-}
-
-
-bool HIsObjectAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
- if (FLAG_fold_constants && value()->IsConstant()) {
- *block = ConstantIsObject(HConstant::cast(value()), isolate())
- ? FirstSuccessor() : SecondSuccessor();
- return true;
- }
- *block = NULL;
- return false;
-}
-
-
bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
if (known_successor_index() != kNoKnownSuccessorIndex) {
*block = SuccessorAt(known_successor_index());
@@ -4034,7 +3997,7 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* left, HValue* right, Strength strength,
+ HValue* left, HValue* right,
PretenureFlag pretenure_flag,
StringAddFlags flags,
Handle<AllocationSite> allocation_site) {
@@ -4052,8 +4015,8 @@ HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
}
}
}
- return new (zone) HStringAdd(context, left, right, strength, pretenure_flag,
- flags, allocation_site);
+ return new (zone)
+ HStringAdd(context, left, right, pretenure_flag, flags, allocation_site);
}
@@ -4445,13 +4408,13 @@ void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
Representation HPhi::RepresentationFromInputs() {
- bool has_type_feedback =
- smi_non_phi_uses() + int32_non_phi_uses() + double_non_phi_uses() > 0;
Representation r = representation();
for (int i = 0; i < OperandCount(); ++i) {
// Ignore conservative Tagged assumption of parameters if we have
// reason to believe that it's too conservative.
- if (has_type_feedback && OperandAt(i)->IsParameter()) continue;
+ if (has_type_feedback_from_uses() && OperandAt(i)->IsParameter()) {
+ continue;
+ }
r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
}
@@ -4626,7 +4589,7 @@ HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
HObjectAccess HObjectAccess::ForField(Handle<Map> map, int index,
Representation representation,
- Handle<String> name) {
+ Handle<Name> name) {
if (index < 0) {
// Negative property indices are in-object properties, indexed
// from the end of the fixed part of the object.
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 807a651029..9f5bc2099c 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -98,17 +98,15 @@ class LChunkBuilder;
V(ForceRepresentation) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -132,6 +130,7 @@ class LChunkBuilder;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArguments) \
V(RegExpLiteral) \
V(Return) \
@@ -1285,6 +1284,18 @@ class HDebugBreak final : public HTemplateInstruction<0> {
};
+class HPrologue final : public HTemplateInstruction<0> {
+ public:
+ static HPrologue* New(Zone* zone) { return new (zone) HPrologue(); }
+
+ Representation RequiredInputRepresentation(int index) override {
+ return Representation::None();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(Prologue)
+};
+
+
class HGoto final : public HTemplateControlInstruction<1, 0> {
public:
explicit HGoto(HBasicBlock* target) {
@@ -2216,8 +2227,7 @@ class HBinaryCall : public HCall<2> {
class HCallJSFunction final : public HCall<1> {
public:
static HCallJSFunction* New(Isolate* isolate, Zone* zone, HValue* context,
- HValue* function, int argument_count,
- bool pass_argument_count);
+ HValue* function, int argument_count);
HValue* function() const { return OperandAt(0); }
@@ -2228,8 +2238,6 @@ class HCallJSFunction final : public HCall<1> {
return Representation::Tagged();
}
- bool pass_argument_count() const { return pass_argument_count_; }
-
bool HasStackCheck() final { return has_stack_check_; }
DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
@@ -2238,15 +2246,12 @@ class HCallJSFunction final : public HCall<1> {
// The argument count includes the receiver.
HCallJSFunction(HValue* function,
int argument_count,
- bool pass_argument_count,
bool has_stack_check)
: HCall<1>(argument_count),
- pass_argument_count_(pass_argument_count),
has_stack_check_(has_stack_check) {
SetOperandAt(0, function);
}
- bool pass_argument_count_;
bool has_stack_check_;
};
@@ -2473,16 +2478,13 @@ class HCallNewArray final : public HBinaryCall {
class HCallRuntime final : public HCall<1> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
- Handle<String>,
- const Runtime::Function*,
- int);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallRuntime,
+ const Runtime::Function*, int);
std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
HValue* context() { return OperandAt(0); }
const Runtime::Function* function() const { return c_function_; }
- Handle<String> name() const { return name_; }
SaveFPRegsMode save_doubles() const { return save_doubles_; }
void set_save_doubles(SaveFPRegsMode save_doubles) {
save_doubles_ = save_doubles;
@@ -2495,17 +2497,15 @@ class HCallRuntime final : public HCall<1> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
private:
- HCallRuntime(HValue* context,
- Handle<String> name,
- const Runtime::Function* c_function,
+ HCallRuntime(HValue* context, const Runtime::Function* c_function,
int argument_count)
- : HCall<1>(argument_count), c_function_(c_function), name_(name),
+ : HCall<1>(argument_count),
+ c_function_(c_function),
save_doubles_(kDontSaveFPRegs) {
SetOperandAt(0, context);
}
const Runtime::Function* c_function_;
- Handle<String> name_;
SaveFPRegsMode save_doubles_;
};
@@ -2635,7 +2635,12 @@ class HUnaryMathOperation final : public HTemplateInstruction<2> {
SetFlag(kAllowUndefinedAsNaN);
}
- bool IsDeletable() const override { return true; }
+ bool IsDeletable() const override {
+ // TODO(crankshaft): This should be true, however the semantics of this
+ // instruction also include the ToNumber conversion that is mentioned in the
+ // spec, which is of course observable.
+ return false;
+ }
HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
@@ -3258,14 +3263,7 @@ class InductionVariableData final : public ZoneObject {
class HPhi final : public HValue {
public:
HPhi(int merged_index, Zone* zone)
- : inputs_(2, zone),
- merged_index_(merged_index),
- phi_id_(-1),
- induction_variable_data_(NULL) {
- for (int i = 0; i < Representation::kNumRepresentations; i++) {
- non_phi_uses_[i] = 0;
- indirect_uses_[i] = 0;
- }
+ : inputs_(2, zone), merged_index_(merged_index) {
DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex);
SetFlag(kFlexibleRepresentation);
SetFlag(kAllowUndefinedAsNaN);
@@ -3318,32 +3316,15 @@ class HPhi final : public HValue {
void InitRealUses(int id);
void AddNonPhiUsesFrom(HPhi* other);
- void AddIndirectUsesTo(int* use_count);
- int tagged_non_phi_uses() const {
- return non_phi_uses_[Representation::kTagged];
- }
- int smi_non_phi_uses() const {
- return non_phi_uses_[Representation::kSmi];
+ Representation representation_from_indirect_uses() const {
+ return representation_from_indirect_uses_;
}
- int int32_non_phi_uses() const {
- return non_phi_uses_[Representation::kInteger32];
- }
- int double_non_phi_uses() const {
- return non_phi_uses_[Representation::kDouble];
- }
- int tagged_indirect_uses() const {
- return indirect_uses_[Representation::kTagged];
- }
- int smi_indirect_uses() const {
- return indirect_uses_[Representation::kSmi];
- }
- int int32_indirect_uses() const {
- return indirect_uses_[Representation::kInteger32];
- }
- int double_indirect_uses() const {
- return indirect_uses_[Representation::kDouble];
+
+ bool has_type_feedback_from_uses() const {
+ return has_type_feedback_from_uses_;
}
+
int phi_id() { return phi_id_; }
static HPhi* cast(HValue* value) {
@@ -3364,13 +3345,19 @@ class HPhi final : public HValue {
}
private:
+ Representation representation_from_non_phi_uses() const {
+ return representation_from_non_phi_uses_;
+ }
+
ZoneList<HValue*> inputs_;
- int merged_index_;
+ int merged_index_ = 0;
- int non_phi_uses_[Representation::kNumRepresentations];
- int indirect_uses_[Representation::kNumRepresentations];
- int phi_id_;
- InductionVariableData* induction_variable_data_;
+ int phi_id_ = -1;
+ InductionVariableData* induction_variable_data_ = nullptr;
+
+ Representation representation_from_indirect_uses_ = Representation::None();
+ Representation representation_from_non_phi_uses_ = Representation::None();
+ bool has_type_feedback_from_uses_ = false;
// TODO(titzer): we can't eliminate the receiver for generating backtraces
bool IsDeletable() const override { return !IsReceiver(); }
@@ -3629,6 +3616,7 @@ class HConstant final : public HTemplateInstruction<0> {
bool HasBooleanValue() const { return type_.IsBoolean(); }
bool BooleanValue() const { return BooleanValueField::decode(bit_field_); }
+ bool IsCallable() const { return IsCallableField::decode(bit_field_); }
bool IsUndetectable() const {
return IsUndetectableField::decode(bit_field_);
}
@@ -3761,9 +3749,10 @@ class HConstant final : public HTemplateInstruction<0> {
class IsNotInNewSpaceField : public BitField<bool, 5, 1> {};
class BooleanValueField : public BitField<bool, 6, 1> {};
class IsUndetectableField : public BitField<bool, 7, 1> {};
+ class IsCallableField : public BitField<bool, 8, 1> {};
static const InstanceType kUnknownInstanceType = FILLER_TYPE;
- class InstanceTypeField : public BitField<InstanceType, 8, 8> {};
+ class InstanceTypeField : public BitField<InstanceType, 16, 8> {};
// If this is a numerical constant, object_ either points to the
// HeapObject the constant originated from or is null. If the
@@ -4454,28 +4443,6 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
};
-class HIsObjectAndBranch final : public HUnaryControlInstruction {
- public:
- DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
- DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
- HBasicBlock*, HBasicBlock*);
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- bool KnownSuccessorBlock(HBasicBlock** block) override;
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
-
- private:
- HIsObjectAndBranch(HValue* value,
- HBasicBlock* true_target = NULL,
- HBasicBlock* false_target = NULL)
- : HUnaryControlInstruction(value, true_target, false_target) {}
-};
-
-
class HIsStringAndBranch final : public HUnaryControlInstruction {
public:
DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
@@ -4559,34 +4526,28 @@ class HIsUndetectableAndBranch final : public HUnaryControlInstruction {
};
-class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
+class HStringCompareAndBranch final : public HTemplateControlInstruction<2, 3> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
HValue*,
HValue*,
Token::Value);
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- HValue* right() { return OperandAt(2); }
+ HValue* context() const { return OperandAt(0); }
+ HValue* left() const { return OperandAt(1); }
+ HValue* right() const { return OperandAt(2); }
Token::Value token() const { return token_; }
- std::ostream& PrintDataTo(std::ostream& os) const override; // NOLINT
+ std::ostream& PrintDataTo(std::ostream& os) const final; // NOLINT
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- Representation GetInputRepresentation() const {
+ Representation RequiredInputRepresentation(int index) final {
return Representation::Tagged();
}
DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
private:
- HStringCompareAndBranch(HValue* context,
- HValue* left,
- HValue* right,
+ HStringCompareAndBranch(HValue* context, HValue* left, HValue* right,
Token::Value token)
: token_(token) {
DCHECK(Token::IsCompareOp(token));
@@ -4595,9 +4556,11 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
SetOperandAt(2, right);
set_representation(Representation::Tagged());
SetChangesFlag(kNewSpacePromotion);
+ SetDependsOnFlag(kStringChars);
+ SetDependsOnFlag(kStringLengths);
}
- Token::Value token_;
+ Token::Value const token_;
};
@@ -4760,34 +4723,32 @@ class HInstanceOf final : public HBinaryOperation {
};
-class HInstanceOfKnownGlobal final : public HTemplateInstruction<2> {
+class HHasInPrototypeChainAndBranch final
+ : public HTemplateControlInstruction<2, 2> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
- HValue*,
- Handle<JSFunction>);
+ DECLARE_INSTRUCTION_FACTORY_P2(HHasInPrototypeChainAndBranch, HValue*,
+ HValue*);
- HValue* context() { return OperandAt(0); }
- HValue* left() { return OperandAt(1); }
- Handle<JSFunction> function() { return function_; }
+ HValue* object() const { return OperandAt(0); }
+ HValue* prototype() const { return OperandAt(1); }
Representation RequiredInputRepresentation(int index) override {
return Representation::Tagged();
}
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal)
+ bool ObjectNeedsSmiCheck() const {
+ return !object()->type().IsHeapObject() &&
+ !object()->representation().IsHeapObject();
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch)
private:
- HInstanceOfKnownGlobal(HValue* context,
- HValue* left,
- Handle<JSFunction> right)
- : HTemplateInstruction<2>(HType::Boolean()), function_(right) {
- SetOperandAt(0, context);
- SetOperandAt(1, left);
- set_representation(Representation::Tagged());
- SetAllSideEffects();
+ HHasInPrototypeChainAndBranch(HValue* object, HValue* prototype) {
+ SetOperandAt(0, object);
+ SetOperandAt(1, prototype);
+ SetDependsOnFlag(kCalls);
}
-
- Handle<JSFunction> function_;
};
@@ -5951,9 +5912,7 @@ class HObjectAccess final {
return Representation::FromKind(RepresentationField::decode(value_));
}
- inline Handle<String> name() const {
- return name_;
- }
+ inline Handle<Name> name() const { return name_; }
inline bool immutable() const {
return ImmutableField::decode(value_);
@@ -5987,6 +5946,11 @@ class HObjectAccess final {
Representation::Integer32());
}
+ static HObjectAccess ForOddballToNumber(
+ Representation representation = Representation::Tagged()) {
+ return HObjectAccess(kInobject, Oddball::kToNumberOffset, representation);
+ }
+
static HObjectAccess ForOddballTypeOf() {
return HObjectAccess(kInobject, Oddball::kTypeOfOffset,
Representation::HeapObject());
@@ -6016,7 +5980,7 @@ class HObjectAccess final {
static HObjectAccess ForAllocationSiteList() {
return HObjectAccess(kExternalMemory, 0, Representation::Tagged(),
- Handle<String>::null(), false, false);
+ Handle<Name>::null(), false, false);
}
static HObjectAccess ForFixedArrayLength() {
@@ -6179,12 +6143,12 @@ class HObjectAccess final {
static HObjectAccess ForCounter() {
return HObjectAccess(kExternalMemory, 0, Representation::Integer32(),
- Handle<String>::null(), false, false);
+ Handle<Name>::null(), false, false);
}
static HObjectAccess ForExternalUInteger8() {
return HObjectAccess(kExternalMemory, 0, Representation::UInteger8(),
- Handle<String>::null(), false, false);
+ Handle<Name>::null(), false, false);
}
// Create an access to an offset in a fixed array header.
@@ -6220,7 +6184,7 @@ class HObjectAccess final {
// Create an access to a resolved field (in-object or backing store).
static HObjectAccess ForField(Handle<Map> map, int index,
Representation representation,
- Handle<String> name);
+ Handle<Name> name);
static HObjectAccess ForJSTypedArrayLength() {
return HObjectAccess::ForObservableJSObjectOffset(
@@ -6335,16 +6299,15 @@ class HObjectAccess final {
HObjectAccess(Portion portion, int offset,
Representation representation = Representation::Tagged(),
- Handle<String> name = Handle<String>::null(),
- bool immutable = false,
- bool existing_inobject_property = true)
- : value_(PortionField::encode(portion) |
- RepresentationField::encode(representation.kind()) |
- ImmutableField::encode(immutable ? 1 : 0) |
- ExistingInobjectPropertyField::encode(
- existing_inobject_property ? 1 : 0) |
- OffsetField::encode(offset)),
- name_(name) {
+ Handle<Name> name = Handle<Name>::null(),
+ bool immutable = false, bool existing_inobject_property = true)
+ : value_(PortionField::encode(portion) |
+ RepresentationField::encode(representation.kind()) |
+ ImmutableField::encode(immutable ? 1 : 0) |
+ ExistingInobjectPropertyField::encode(
+ existing_inobject_property ? 1 : 0) |
+ OffsetField::encode(offset)),
+ name_(name) {
// assert that the fields decode correctly
DCHECK(this->offset() == offset);
DCHECK(this->portion() == portion);
@@ -6361,7 +6324,7 @@ class HObjectAccess final {
class OffsetField : public BitField<int, 9, 23> {};
uint32_t value_; // encodes portion, representation, immutable, and offset
- Handle<String> name_;
+ Handle<Name> name_;
friend class HLoadNamedField;
friend class HStoreNamedField;
@@ -7390,8 +7353,7 @@ class HStringAdd final : public HBinaryOperation {
public:
static HInstruction* New(
Isolate* isolate, Zone* zone, HValue* context, HValue* left,
- HValue* right, Strength strength = Strength::WEAK,
- PretenureFlag pretenure_flag = NOT_TENURED,
+ HValue* right, PretenureFlag pretenure_flag = NOT_TENURED,
StringAddFlags flags = STRING_ADD_CHECK_BOTH,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
@@ -7413,16 +7375,21 @@ class HStringAdd final : public HBinaryOperation {
}
private:
- HStringAdd(HValue* context, HValue* left, HValue* right, Strength strength,
+ HStringAdd(HValue* context, HValue* left, HValue* right,
PretenureFlag pretenure_flag, StringAddFlags flags,
Handle<AllocationSite> allocation_site)
- : HBinaryOperation(context, left, right, strength, HType::String()),
+ : HBinaryOperation(context, left, right, Strength::WEAK, HType::String()),
flags_(flags),
pretenure_flag_(pretenure_flag) {
set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
+ if ((flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT) {
+ SetAllSideEffects();
+ ClearFlag(kUseGVN);
+ } else {
+ SetChangesFlag(kNewSpacePromotion);
+ SetFlag(kUseGVN);
+ }
SetDependsOnFlag(kMaps);
- SetChangesFlag(kNewSpacePromotion);
if (FLAG_trace_pretenuring) {
PrintF("HStringAdd with AllocationSite %p %s\n",
allocation_site.is_null()
@@ -7432,8 +7399,9 @@ class HStringAdd final : public HBinaryOperation {
}
}
- // No side-effects except possible allocation:
- bool IsDeletable() const override { return true; }
+ bool IsDeletable() const final {
+ return (flags_ & STRING_ADD_CONVERT) != STRING_ADD_CONVERT;
+ }
const StringAddFlags flags_;
const PretenureFlag pretenure_flag_;
@@ -7586,56 +7554,6 @@ class HRegExpLiteral final : public HMaterializedLiteral<1> {
};
-class HFunctionLiteral final : public HTemplateInstruction<1> {
- public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
- Handle<SharedFunctionInfo>,
- bool);
- HValue* context() { return OperandAt(0); }
-
- Representation RequiredInputRepresentation(int index) override {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral)
-
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- bool pretenure() const { return PretenureField::decode(bit_field_); }
- bool has_no_literals() const {
- return HasNoLiteralsField::decode(bit_field_);
- }
- FunctionKind kind() const { return FunctionKindField::decode(bit_field_); }
- LanguageMode language_mode() const {
- return LanguageModeField::decode(bit_field_);
- }
-
- private:
- HFunctionLiteral(HValue* context, Handle<SharedFunctionInfo> shared,
- bool pretenure)
- : HTemplateInstruction<1>(HType::JSObject()),
- shared_info_(shared),
- bit_field_(FunctionKindField::encode(shared->kind()) |
- PretenureField::encode(pretenure) |
- HasNoLiteralsField::encode(shared->num_literals() == 0) |
- LanguageModeField::encode(shared->language_mode())) {
- SetOperandAt(0, context);
- set_representation(Representation::Tagged());
- SetChangesFlag(kNewSpacePromotion);
- }
-
- bool IsDeletable() const override { return true; }
-
- class FunctionKindField : public BitField<FunctionKind, 0, 8> {};
- class PretenureField : public BitField<bool, 8, 1> {};
- class HasNoLiteralsField : public BitField<bool, 9, 1> {};
- STATIC_ASSERT(LANGUAGE_END == 3);
- class LanguageModeField : public BitField<LanguageMode, 10, 2> {};
-
- Handle<SharedFunctionInfo> shared_info_;
- uint32_t bit_field_;
-};
-
-
class HTypeof final : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
diff --git a/deps/v8/src/hydrogen-sce.cc b/deps/v8/src/hydrogen-sce.cc
index 6944f7090e..235a94142d 100644
--- a/deps/v8/src/hydrogen-sce.cc
+++ b/deps/v8/src/hydrogen-sce.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/hydrogen-sce.h"
-#include "src/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 8984a6e9f3..901e10721d 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -6,10 +6,9 @@
#include <sstream>
-#include "src/v8.h"
-
#include "src/allocation-site-scopes.h"
#include "src/ast-numbering.h"
+#include "src/code-factory.h"
#include "src/full-codegen/full-codegen.h"
#include "src/hydrogen-bce.h"
#include "src/hydrogen-bch.h"
@@ -37,6 +36,7 @@
#include "src/ic/ic.h"
// GetRootConstructor
#include "src/ic/ic-inl.h"
+#include "src/isolate-inl.h"
#include "src/lithium-allocator.h"
#include "src/parser.h"
#include "src/runtime/runtime.h"
@@ -1567,7 +1567,6 @@ void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
internalized.Else();
Add<HPushArguments>(key);
HValue* intern_key = Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kInternalizeString), 1);
Push(intern_key);
@@ -1725,7 +1724,6 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
// TODO(jkummerow): walk the prototype chain instead.
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(is_strong(language_mode)
? Runtime::kKeyedGetPropertyStrong
: Runtime::kKeyedGetProperty),
@@ -1788,7 +1786,6 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
details_compare.Else();
Add<HPushArguments>(receiver, key);
Push(Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(is_strong(language_mode)
? Runtime::kKeyedGetPropertyStrong
: Runtime::kKeyedGetProperty),
@@ -1820,6 +1817,37 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
}
+HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
+ HValue* done) {
+ NoObservableSideEffectsScope scope(this);
+
+ // Allocate the JSIteratorResult object.
+ HValue* result =
+ Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
+ NOT_TENURED, JS_ITERATOR_RESULT_TYPE);
+
+ // Initialize the JSIteratorResult object.
+ HValue* native_context = BuildGetNativeContext();
+ HValue* map = Add<HLoadNamedField>(
+ native_context, nullptr,
+ HObjectAccess::ForContextSlot(Context::ITERATOR_RESULT_MAP_INDEX));
+ Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
+ HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+ Add<HStoreNamedField>(result, HObjectAccess::ForPropertiesPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(),
+ empty_fixed_array);
+ Add<HStoreNamedField>(result, HObjectAccess::ForObservableJSObjectOffset(
+ JSIteratorResult::kValueOffset),
+ value);
+ Add<HStoreNamedField>(result, HObjectAccess::ForObservableJSObjectOffset(
+ JSIteratorResult::kDoneOffset),
+ done);
+ STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+ return result;
+}
+
+
HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
HValue* index,
HValue* input) {
@@ -2022,7 +2050,6 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
// Cache miss, fallback to runtime.
Add<HPushArguments>(object);
Push(Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
1));
}
@@ -2438,8 +2465,7 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
{
// Fallback to the runtime to add the two strings.
Add<HPushArguments>(left, right);
- Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kStringAdd), 2));
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
}
if_sameencodingandsequential.End();
}
@@ -3515,16 +3541,15 @@ HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
}
-HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
+HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
- GlobalObject::kBuiltinsOffset);
- HValue* builtins = Add<HLoadNamedField>(global_object, nullptr, access);
- HObjectAccess function_access = HObjectAccess::ForObservableJSObjectOffset(
- JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
- return Add<HLoadNamedField>(builtins, nullptr, function_access);
+ GlobalObject::kNativeContextOffset);
+ HValue* native_context = Add<HLoadNamedField>(global_object, nullptr, access);
+ HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
+ return Add<HLoadNamedField>(native_context, nullptr, function_access);
}
@@ -3654,7 +3679,7 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
- start_environment_->set_ast_id(BailoutId::FunctionEntry());
+ start_environment_->set_ast_id(BailoutId::Prologue());
entry_block_ = CreateBasicBlock();
entry_block_->SetInitialEnvironment(start_environment_);
}
@@ -4416,12 +4441,6 @@ bool HOptimizedGraphBuilder::BuildGraph() {
return false;
}
- int slots = current_info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (current_info()->scope()->is_script_scope() && slots > 0) {
- Bailout(kScriptContext);
- return false;
- }
-
Scope* scope = current_info()->scope();
SetUpScope(scope);
@@ -4625,48 +4644,61 @@ HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
+ HEnvironment* prolog_env = environment();
+ int parameter_count = environment()->parameter_count();
+ ZoneList<HValue*> parameters(parameter_count, zone());
+ for (int i = 0; i < parameter_count; ++i) {
+ HInstruction* parameter = Add<HParameter>(static_cast<unsigned>(i));
+ parameters.Add(parameter, zone());
+ environment()->Bind(i, parameter);
+ }
+
+ HConstant* undefined_constant = graph()->GetConstantUndefined();
+ // Initialize specials and locals to undefined.
+ for (int i = parameter_count + 1; i < environment()->length(); ++i) {
+ environment()->Bind(i, undefined_constant);
+ }
+ Add<HPrologue>();
+
+ HEnvironment* initial_env = environment()->CopyWithoutHistory();
+ HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+ GotoNoSimulate(body_entry);
+ set_current_block(body_entry);
+
+ // Initialize context of prolog environment to undefined.
+ prolog_env->BindContext(undefined_constant);
+
// First special is HContext.
HInstruction* context = Add<HContext>();
environment()->BindContext(context);
// Create an arguments object containing the initial parameters. Set the
// initial values of parameters including "this" having parameter index 0.
- DCHECK_EQ(scope->num_parameters() + 1, environment()->parameter_count());
- HArgumentsObject* arguments_object =
- New<HArgumentsObject>(environment()->parameter_count());
- for (int i = 0; i < environment()->parameter_count(); ++i) {
- HInstruction* parameter = Add<HParameter>(i);
+ DCHECK_EQ(scope->num_parameters() + 1, parameter_count);
+ HArgumentsObject* arguments_object = New<HArgumentsObject>(parameter_count);
+ for (int i = 0; i < parameter_count; ++i) {
+ HValue* parameter = parameters.at(i);
arguments_object->AddArgument(parameter, zone());
- environment()->Bind(i, parameter);
}
+
AddInstruction(arguments_object);
graph()->SetArgumentsObject(arguments_object);
- HConstant* undefined_constant = graph()->GetConstantUndefined();
- // Initialize specials and locals to undefined.
- for (int i = environment()->parameter_count() + 1;
- i < environment()->length();
- ++i) {
- environment()->Bind(i, undefined_constant);
- }
-
// Handle the arguments and arguments shadow variables specially (they do
// not have declarations).
if (scope->arguments() != NULL) {
- environment()->Bind(scope->arguments(),
- graph()->GetArgumentsObject());
- }
-
- int rest_index;
- Variable* rest = scope->rest_parameter(&rest_index);
- if (rest) {
- return Bailout(kRestParameter);
+ environment()->Bind(scope->arguments(), graph()->GetArgumentsObject());
}
if (scope->this_function_var() != nullptr ||
scope->new_target_var() != nullptr) {
return Bailout(kSuperReference);
}
+
+ // Trace the call.
+ if (FLAG_trace && top_info()->IsOptimizing()) {
+ Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kTraceEnter), 0);
+ }
}
@@ -4690,7 +4722,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
{ BreakAndContinueScope push(&break_info, this);
if (scope != NULL) {
- if (scope->ContextLocalCount() > 0) {
+ if (scope->NeedsContext()) {
// Load the function object.
Scope* declaration_scope = scope->DeclarationScope();
HInstruction* function;
@@ -4758,6 +4790,12 @@ void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
}
+void HOptimizedGraphBuilder::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
@@ -5301,8 +5339,7 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
map = graph()->GetConstant1();
Runtime::FunctionId function_id = Runtime::kGetPropertyNamesFast;
Add<HPushArguments>(enumerable);
- array = Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(function_id), 1);
+ array = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
Push(array);
Add<HSimulate>(stmt->EnumId());
Drop(1);
@@ -5357,8 +5394,7 @@ void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
} else {
Add<HPushArguments>(enumerable, key);
Runtime::FunctionId function_id = Runtime::kForInFilter;
- key = Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(function_id), 2);
+ key = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
Push(key);
Add<HSimulate>(stmt->FilterId());
key = Pop();
@@ -5444,8 +5480,25 @@ void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
expr, current_info()->script(), top_info());
// We also have a stack overflow if the recursive compilation did.
if (HasStackOverflow()) return;
- HFunctionLiteral* instr =
- New<HFunctionLiteral>(shared_info, expr->pretenure());
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ HConstant* shared_info_value = Add<HConstant>(shared_info);
+ HInstruction* instr;
+ if (!expr->pretenure() && shared_info->num_literals() == 0) {
+ FastNewClosureStub stub(isolate(), shared_info->language_mode(),
+ shared_info->kind());
+ FastNewClosureDescriptor descriptor(isolate());
+ HValue* values[] = {context(), shared_info_value};
+ HConstant* stub_value = Add<HConstant>(stub.GetCode());
+ instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
+ Vector<HValue*>(values, arraysize(values)),
+ NORMAL_CALL);
+ } else {
+ Add<HPushArguments>(shared_info_value);
+ Runtime::FunctionId function_id =
+ expr->pretenure() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
+ instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+ }
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5716,7 +5769,7 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- Handle<FixedArray> literals(closure->literals());
+ Handle<LiteralsArray> literals(closure->literals());
HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
expr->pattern(),
expr->flags(),
@@ -5812,8 +5865,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
// Check whether to use fast or slow deep-copying for boilerplate.
int max_properties = kMaxFastLiteralProperties;
- Handle<Object> literals_cell(closure->literals()->get(expr->literal_index()),
- isolate());
+ Handle<Object> literals_cell(
+ closure->literals()->literal(expr->literal_index()), isolate());
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
if (!literals_cell->IsUndefined()) {
@@ -5831,7 +5884,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
site_context.ExitScope(site, boilerplate);
} else {
NoObservableSideEffectsScope no_effects(this);
- Handle<FixedArray> closure_literals(closure->literals(), isolate());
+ Handle<LiteralsArray> closure_literals(closure->literals(), isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
int literal_index = expr->literal_index();
int flags = expr->ComputeFlags(true);
@@ -5842,15 +5895,12 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Add<HConstant>(flags));
Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
- literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(function_id),
- 4);
+ literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
}
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
Push(literal);
- int store_slot_index = 0;
for (int i = 0; i < expr->properties()->length(); i++) {
ObjectLiteral::Property* property = expr->properties()->at(i);
if (property->is_computed_name()) return Bailout(kComputedPropertyName);
@@ -5874,7 +5924,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<Map> map = property->GetReceiverType();
Handle<String> name = key->AsPropertyName();
HValue* store;
- FeedbackVectorICSlot slot = expr->GetNthSlot(store_slot_index++);
+ FeedbackVectorICSlot slot = property->GetSlot();
if (map.is_null()) {
// If we don't know the monomorphic type, do a generic store.
CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
@@ -5902,8 +5952,7 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (FunctionLiteral::NeedsHomeObject(property->value())) {
Handle<Symbol> sym = isolate()->factory()->home_object_symbol();
HInstruction* store_home = BuildNamedGeneric(
- STORE, NULL, expr->GetNthSlot(store_slot_index++), value, sym,
- literal);
+ STORE, NULL, property->GetSlot(1), value, sym, literal);
AddInstruction(store_home);
DCHECK(store_home->HasObservableSideEffects());
Add<HSimulate>(property->value()->id(), REMOVABLE_SIMULATE);
@@ -5922,9 +5971,6 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
}
}
- // Crankshaft may not consume all the slots because it doesn't emit accessors.
- DCHECK(!FLAG_vector_stores || store_slot_index <= expr->slot_count());
-
if (expr->has_function()) {
// Return the result of the transformation to fast properties
// instead of the original since this operation changes the map
@@ -5949,9 +5995,10 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HInstruction* literal;
Handle<AllocationSite> site;
- Handle<FixedArray> literals(environment()->closure()->literals(), isolate());
+ Handle<LiteralsArray> literals(environment()->closure()->literals(),
+ isolate());
bool uninitialized = false;
- Handle<Object> literals_cell(literals->get(expr->literal_index()),
+ Handle<Object> literals_cell(literals->literal(expr->literal_index()),
isolate());
Handle<JSObject> boilerplate_object;
if (literals_cell->IsUndefined()) {
@@ -5971,7 +6018,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
return Bailout(kArrayBoilerplateCreationFailed);
}
creation_context.ExitScope(site, boilerplate_object);
- literals->set(expr->literal_index(), *site);
+ literals->set_literal(expr->literal_index(), *site);
if (boilerplate_object->elements()->map() ==
isolate()->heap()->fixed_cow_array_map()) {
@@ -6013,9 +6060,7 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
Add<HConstant>(flags));
Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
- literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(function_id),
- 4);
+ literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
// Register to deopt if the boilerplate ElementsKind changes.
top_info()->dependencies()->AssumeTransitionStable(site);
@@ -6366,8 +6411,8 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
bool HOptimizedGraphBuilder::PropertyAccessInfo::IsIntegerIndexedExotic() {
InstanceType instance_type = map_->instance_type();
- return instance_type == JS_TYPED_ARRAY_TYPE &&
- IsSpecialIndex(isolate()->unicode_cache(), *name_);
+ return instance_type == JS_TYPED_ARRAY_TYPE && name_->IsString() &&
+ IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name_));
}
@@ -6375,7 +6420,8 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
if (!CanInlinePropertyAccess(map_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
if (IsJSArrayBufferViewFieldAccessor()) return IsLoad();
- if (map_->function_with_prototype() && !map_->has_non_instance_prototype() &&
+ if (map_->IsJSFunctionMap() && map_->is_constructor() &&
+ !map_->has_non_instance_prototype() &&
name_.is_identical_to(isolate()->factory()->prototype_string())) {
return IsLoad();
}
@@ -6489,7 +6535,7 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
}
if (info->name().is_identical_to(isolate()->factory()->prototype_string()) &&
- info->map()->function_with_prototype()) {
+ info->map()->IsJSFunctionMap() && info->map()->is_constructor()) {
DCHECK(!info->map()->has_non_instance_prototype());
return New<HLoadFunctionPrototype>(checked_object);
}
@@ -6504,7 +6550,6 @@ HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
DCHECK(info->IsLoad());
if (is_strong(function_language_mode())) {
return New<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
0);
} else {
@@ -7160,8 +7205,7 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
HValue* value = environment()->Pop();
if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
Add<HPushArguments>(value);
- Add<HCallRuntime>(isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kThrow), 1);
+ Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kThrow), 1);
Add<HSimulate>(expr->id());
// If the throw definitely exits the function, we can finish with a dummy
@@ -7233,7 +7277,8 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
return result;
} else {
if (FLAG_vector_stores &&
- current_feedback_vector()->GetKind(slot) == Code::KEYED_STORE_IC) {
+ current_feedback_vector()->GetKind(slot) ==
+ FeedbackVectorSlotKind::KEYED_STORE_IC) {
// It's possible that a keyed store of a constant string was converted
// to a named store. Here, at the last minute, we need to make sure to
// use a generic Keyed Store if we are using the type vector, because
@@ -7328,7 +7373,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
PrototypeIterator iter(map);
JSObject* holder = NULL;
while (!iter.IsAtEnd()) {
- holder = JSObject::cast(*PrototypeIterator::GetCurrent(iter));
+ holder = *PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
}
DCHECK(holder && holder->IsJSObject());
@@ -7410,11 +7455,45 @@ HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
ElementsKind consolidated_elements_kind = has_seen_holey_elements
? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
: most_general_consolidated_map->elements_kind();
+ LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
+ if (has_seen_holey_elements) {
+ // Make sure that all of the maps we are handling have the initial array
+ // prototype.
+ bool saw_non_array_prototype = false;
+ for (int i = 0; i < maps->length(); ++i) {
+ Handle<Map> map = maps->at(i);
+ if (map->prototype() != *isolate()->initial_array_prototype()) {
+ // We can't guarantee that loading the hole is safe. The prototype may
+ // have an element at this position.
+ saw_non_array_prototype = true;
+ break;
+ }
+ }
+
+ if (!saw_non_array_prototype) {
+ Handle<Map> holey_map = handle(
+ isolate()->get_initial_js_array_map(consolidated_elements_kind));
+ load_mode = BuildKeyedHoleMode(holey_map);
+ if (load_mode != NEVER_RETURN_HOLE) {
+ for (int i = 0; i < maps->length(); ++i) {
+ Handle<Map> map = maps->at(i);
+ // The prototype check was already done for the holey map in
+ // BuildKeyedHoleMode.
+ if (!map.is_identical_to(holey_map)) {
+ Handle<JSObject> prototype(JSObject::cast(map->prototype()),
+ isolate());
+ Handle<JSObject> object_prototype =
+ isolate()->initial_object_prototype();
+ BuildCheckPrototypeMaps(prototype, object_prototype);
+ }
+ }
+ }
+ }
+ }
HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
checked_object, key, val,
most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
- consolidated_elements_kind,
- LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
+ consolidated_elements_kind, LOAD, load_mode, STANDARD_STORE);
return instr;
}
@@ -7857,15 +7936,13 @@ HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
PrototypeIterator::START_AT_RECEIVER);
while (holder.is_null() ||
!PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
- BuildConstantMapCheck(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
+ BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
iter.Advance();
if (iter.IsAtEnd()) {
return NULL;
}
}
- return BuildConstantMapCheck(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
+ return BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
}
@@ -7878,9 +7955,9 @@ void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
}
-HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(
- HValue* fun, int argument_count, bool pass_argument_count) {
- return New<HCallJSFunction>(fun, argument_count, pass_argument_count);
+HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
+ int argument_count) {
+ return New<HCallJSFunction>(fun, argument_count);
}
@@ -7918,7 +7995,7 @@ HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
if (jsfun.is_identical_to(current_info()->closure())) {
graph()->MarkRecursive();
}
- return NewPlainFunctionCall(target, argument_count, dont_adapt_arguments);
+ return NewPlainFunctionCall(target, argument_count);
} else {
HValue* param_count_value = Add<HConstant>(formal_parameter_count);
HValue* context = Add<HLoadNamedField>(
@@ -8337,7 +8414,9 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
// Type-check the inlined function.
DCHECK(target_shared->has_deoptimization_support());
- AstTyper::Run(&target_info);
+ AstTyper(target_info.isolate(), target_info.zone(), target_info.closure(),
+ target_info.scope(), target_info.osr_ast_id(), target_info.literal())
+ .Run();
int inlining_id = 0;
if (top_info()->is_tracking_positions()) {
@@ -8603,9 +8682,10 @@ bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
return !receiver_map.is_null() &&
receiver_map->instance_type() == JS_ARRAY_TYPE &&
IsFastElementsKind(receiver_map->elements_kind()) &&
- !receiver_map->is_dictionary_map() &&
- !IsReadOnlyLengthDescriptor(receiver_map) &&
- !receiver_map->is_observed() && receiver_map->is_extensible();
+ !receiver_map->is_dictionary_map() && !receiver_map->is_observed() &&
+ receiver_map->is_extensible() &&
+ (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
+ !IsReadOnlyLengthDescriptor(receiver_map);
}
@@ -8936,7 +9016,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
if_inline.Else();
{
Add<HPushArguments>(receiver);
- result = Add<HCallJSFunction>(function, 1, true);
+ result = Add<HCallJSFunction>(function, 1);
if (!ast_context()->IsEffect()) Push(result);
}
if_inline.End();
@@ -9838,16 +9918,6 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
// Allocate an instance of the implicit receiver object.
HValue* size_in_bytes = Add<HConstant>(instance_size);
HAllocationMode allocation_mode;
- if (FLAG_pretenuring_call_new) {
- if (FLAG_allocation_site_pretenuring) {
- // Try to use pretenuring feedback.
- Handle<AllocationSite> allocation_site = expr->allocation_site();
- allocation_mode = HAllocationMode(allocation_site);
- // Take a dependency on allocation site.
- top_info()->dependencies()->AssumeTenuringDecision(allocation_site);
- }
- }
-
HAllocate* receiver = BuildAllocate(
size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, allocation_mode);
receiver->set_known_initial_map(initial_map);
@@ -10286,7 +10356,7 @@ void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
Push(byte_length);
CHECK_ALIVE(VisitForValue(arguments->at(kInitializeArg)));
PushArgumentsFromEnvironment(kArgsLength);
- Add<HCallRuntime>(expr->name(), expr->function(), kArgsLength);
+ Add<HCallRuntime>(expr->function(), kArgsLength);
}
}
byte_offset_smi.End();
@@ -10377,11 +10447,10 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
FOR_EACH_HYDROGEN_INTRINSIC(CALL_INTRINSIC_GENERATOR)
#undef CALL_INTRINSIC_GENERATOR
default: {
- Handle<String> name = expr->name();
int argument_count = expr->arguments()->length();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
PushArgumentsFromEnvironment(argument_count);
- HCallRuntime* call = New<HCallRuntime>(name, function, argument_count);
+ HCallRuntime* call = New<HCallRuntime>(function, argument_count);
return ast_context()->ReturnInstruction(call, expr->id());
}
}
@@ -10412,7 +10481,6 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
HValue* obj = Pop();
Add<HPushArguments>(obj, key);
HInstruction* instr = New<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(is_strict(function_language_mode())
? Runtime::kDeleteProperty_Strict
: Runtime::kDeleteProperty_Sloppy),
@@ -10927,9 +10995,9 @@ HValue* HGraphBuilder::BuildBinaryOperation(
left = BuildNumberToString(left, left_type);
} else if (!left_type->Is(Type::String())) {
DCHECK(right_type->Is(Type::String()));
- HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
- Add<HPushArguments>(left, right);
- return AddUncasted<HInvokeFunction>(function, 2);
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
}
// Convert right argument as necessary.
@@ -10938,9 +11006,9 @@ HValue* HGraphBuilder::BuildBinaryOperation(
right = BuildNumberToString(right, right_type);
} else if (!right_type->Is(Type::String())) {
DCHECK(left_type->Is(Type::String()));
- HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
- Add<HPushArguments>(left, right);
- return AddUncasted<HInvokeFunction>(function, 2);
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
}
}
@@ -10957,7 +11025,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
if (!right_string.is_null() && right_string->length() == 0) return left;
if (!left_string.is_null() && !right_string.is_null()) {
return AddUncasted<HStringAdd>(
- left, right, strength, allocation_mode.GetPretenureMode(),
+ left, right, allocation_mode.GetPretenureMode(),
STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
@@ -10986,8 +11054,8 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// Fallback to using the string add stub.
return AddUncasted<HStringAdd>(
- left, right, strength, allocation_mode.GetPretenureMode(),
- STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
+ left, right, allocation_mode.GetPretenureMode(), STRING_ADD_CHECK_NONE,
+ allocation_mode.feedback_site());
}
if (graph()->info()->IsStub()) {
@@ -11005,10 +11073,57 @@ HValue* HGraphBuilder::BuildBinaryOperation(
// inline several instructions (including the two pushes) for every tagged
// operation in optimized code, which is more expensive, than a stub call.
if (graph()->info()->IsStub() && is_non_primitive) {
- HValue* function =
- AddLoadJSBuiltin(BinaryOpIC::TokenToJSBuiltin(op, strength));
+ Runtime::FunctionId function_id;
+ switch (op) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ function_id =
+ is_strong(strength) ? Runtime::kAdd_Strong : Runtime::kAdd;
+ break;
+ case Token::SUB:
+ function_id = is_strong(strength) ? Runtime::kSubtract_Strong
+ : Runtime::kSubtract;
+ break;
+ case Token::MUL:
+ function_id = is_strong(strength) ? Runtime::kMultiply_Strong
+ : Runtime::kMultiply;
+ break;
+ case Token::DIV:
+ function_id =
+ is_strong(strength) ? Runtime::kDivide_Strong : Runtime::kDivide;
+ break;
+ case Token::MOD:
+ function_id =
+ is_strong(strength) ? Runtime::kModulus_Strong : Runtime::kModulus;
+ break;
+ case Token::BIT_OR:
+ function_id = is_strong(strength) ? Runtime::kBitwiseOr_Strong
+ : Runtime::kBitwiseOr;
+ break;
+ case Token::BIT_AND:
+ function_id = is_strong(strength) ? Runtime::kBitwiseAnd_Strong
+ : Runtime::kBitwiseAnd;
+ break;
+ case Token::BIT_XOR:
+ function_id = is_strong(strength) ? Runtime::kBitwiseXor_Strong
+ : Runtime::kBitwiseXor;
+ break;
+ case Token::SAR:
+ function_id = is_strong(strength) ? Runtime::kShiftRight_Strong
+ : Runtime::kShiftRight;
+ break;
+ case Token::SHR:
+ function_id = is_strong(strength) ? Runtime::kShiftRightLogical_Strong
+ : Runtime::kShiftRightLogical;
+ break;
+ case Token::SHL:
+ function_id = is_strong(strength) ? Runtime::kShiftLeft_Strong
+ : Runtime::kShiftLeft;
+ break;
+ }
Add<HPushArguments>(left, right);
- instr = AddUncasted<HInvokeFunction>(function, 2);
+ instr = AddUncasted<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
} else {
if (is_strong(strength) && Token::IsBitOp(op)) {
// TODO(conradw): This is not efficient, but is necessary to prevent
@@ -11019,7 +11134,6 @@ HValue* HGraphBuilder::BuildBinaryOperation(
if_builder.OrIf<HHasInstanceTypeAndBranch>(right, ODDBALL_TYPE);
if_builder.Then();
Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
0);
if (!graph()->info()->IsStub()) {
@@ -11059,7 +11173,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
instr = AddUncasted<HBitwise>(op, left, right, strength);
break;
case Token::BIT_OR: {
- HValue* operand, *shift_amount;
+ HValue *operand, *shift_amount;
if (left_type->Is(Type::Signed32()) &&
right_type->Is(Type::Signed32()) &&
MatchRotateRight(left, right, &operand, &shift_amount)) {
@@ -11117,7 +11231,8 @@ static bool IsClassOfTest(CompareOperation* expr) {
Literal* literal = expr->right()->AsLiteral();
if (literal == NULL) return false;
if (!literal->value()->IsString()) return false;
- if (!call->name()->IsOneByteEqualTo(STATIC_CHAR_VECTOR("_ClassOf"))) {
+ if (!call->is_jsruntime() &&
+ call->function()->function_id != Runtime::kInlineClassOf) {
return false;
}
DCHECK(call->arguments()->length() == 1);
@@ -11349,22 +11464,28 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
// Check to see if the rhs of the instanceof is a known function.
if (right->IsConstant() &&
HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
- Handle<Object> function = HConstant::cast(right)->handle(isolate());
- Handle<JSFunction> target = Handle<JSFunction>::cast(function);
- HInstanceOfKnownGlobal* result =
- New<HInstanceOfKnownGlobal>(left, target);
- return ast_context()->ReturnInstruction(result, expr->id());
+ Handle<JSFunction> constructor =
+ Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
+ if (!constructor->map()->has_non_instance_prototype()) {
+ JSFunction::EnsureHasInitialMap(constructor);
+ DCHECK(constructor->has_initial_map());
+ Handle<Map> initial_map(constructor->initial_map(), isolate());
+ top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
+ HInstruction* prototype =
+ Add<HConstant>(handle(initial_map->prototype(), isolate()));
+ HHasInPrototypeChainAndBranch* result =
+ New<HHasInPrototypeChainAndBranch>(left, prototype);
+ return ast_context()->ReturnControl(result, expr->id());
+ }
}
HInstanceOf* result = New<HInstanceOf>(left, right);
return ast_context()->ReturnInstruction(result, expr->id());
} else if (op == Token::IN) {
- HValue* function = AddLoadJSBuiltin(Builtins::IN);
Add<HPushArguments>(left, right);
- // TODO(olivf) InvokeFunction produces a check for the parameter count,
- // even though we are certain to pass the correct number of arguments here.
- HInstruction* result = New<HInvokeFunction>(function, 2);
+ HInstruction* result =
+ New<HCallRuntime>(Runtime::FunctionForId(Runtime::kHasProperty), 2);
return ast_context()->ReturnInstruction(result, expr->id());
}
@@ -11434,6 +11555,51 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
return result;
}
} else {
+ if (combined_type->IsClass()) {
+ // TODO(bmeurer): This is an optimized version of an x < y, x > y,
+ // x <= y or x >= y, where both x and y are spec objects with the
+ // same map. The CompareIC collects this map for us. So if we know
+ // that there's no @@toPrimitive on the map (including the prototype
+ // chain), and both valueOf and toString are the default initial
+ // implementations (on the %ObjectPrototype%), then we can reduce
+ // the comparison to map checks on x and y, because the comparison
+ // will turn into a comparison of "[object CLASS]" to itself (the
+ // default outcome of toString, since valueOf returns a spec object).
+ // This is pretty much adhoc, so in TurboFan we could do a lot better
+ // and inline the interesting parts of ToPrimitive (actually we could
+ // even do that in Crankshaft but we don't want to waste too much
+ // time on this now).
+ DCHECK(Token::IsOrderedRelationalCompareOp(op));
+ Handle<Map> map = combined_type->AsClass()->Map();
+ PropertyAccessInfo value_of(this, LOAD, map,
+ isolate()->factory()->valueOf_string());
+ PropertyAccessInfo to_primitive(
+ this, LOAD, map, isolate()->factory()->to_primitive_symbol());
+ PropertyAccessInfo to_string(this, LOAD, map,
+ isolate()->factory()->toString_string());
+ PropertyAccessInfo to_string_tag(
+ this, LOAD, map, isolate()->factory()->to_string_tag_symbol());
+ if (to_primitive.CanAccessMonomorphic() && !to_primitive.IsFound() &&
+ to_string_tag.CanAccessMonomorphic() &&
+ (!to_string_tag.IsFound() || to_string_tag.IsData() ||
+ to_string_tag.IsDataConstant()) &&
+ value_of.CanAccessMonomorphic() && value_of.IsDataConstant() &&
+ value_of.constant().is_identical_to(isolate()->object_value_of()) &&
+ to_string.CanAccessMonomorphic() && to_string.IsDataConstant() &&
+ to_string.constant().is_identical_to(
+ isolate()->object_to_string())) {
+ // We depend on the prototype chain to stay the same, because we
+ // also need to deoptimize when someone installs @@toPrimitive
+ // or @@toStringTag somewhere in the prototype chain.
+ BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
+ Handle<JSObject>::null());
+ AddCheckMap(left, map);
+ AddCheckMap(right, map);
+ // The caller expects a branch instruction, so make it happy.
+ return New<HBranch>(
+ graph()->GetConstantBool(op == Token::LTE || op == Token::GTE));
+ }
+ }
Bailout(kUnsupportedNonPrimitiveCompare);
return NULL;
}
@@ -11465,6 +11631,23 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HStringCompareAndBranch* result =
New<HStringCompareAndBranch>(left, right, op);
return result;
+ } else if (combined_type->Is(Type::Boolean())) {
+ AddCheckMap(left, isolate()->factory()->boolean_map());
+ AddCheckMap(right, isolate()->factory()->boolean_map());
+ if (Token::IsEqualityOp(op)) {
+ HCompareObjectEqAndBranch* result =
+ New<HCompareObjectEqAndBranch>(left, right);
+ return result;
+ }
+ left = Add<HLoadNamedField>(
+ left, nullptr,
+ HObjectAccess::ForOddballToNumber(Representation::Smi()));
+ right = Add<HLoadNamedField>(
+ right, nullptr,
+ HObjectAccess::ForOddballToNumber(Representation::Smi()));
+ HCompareNumericAndBranch* result =
+ New<HCompareNumericAndBranch>(left, right, op);
+ return result;
} else {
if (combined_rep.IsTagged() || combined_rep.IsNone()) {
HCompareGeneric* result = Add<HCompareGeneric>(
@@ -11527,6 +11710,11 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
void HOptimizedGraphBuilder::VisitSpread(Spread* expr) { UNREACHABLE(); }
+void HOptimizedGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
+ UNREACHABLE();
+}
+
+
HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
// If we share optimized code between different closures, the
// this-function is not a constant, except inside an inlined body.
@@ -12030,12 +12218,30 @@ void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
+void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* value = Pop();
- HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
- return ast_context()->ReturnControl(result, call->id());
+ HValue* input = Pop();
+ if (input->type().IsSmi()) {
+ return ast_context()->ReturnValue(input);
+ } else {
+ IfBuilder if_inputissmi(this);
+ if_inputissmi.If<HIsSmiAndBranch>(input);
+ if_inputissmi.Then();
+ {
+ // Return the input value.
+ Push(input);
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_inputissmi.Else();
+ {
+ Add<HPushArguments>(input);
+ Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToInteger), 1));
+ Add<HSimulate>(call->id(), FIXED_SIMULATE);
+ }
+ if_inputissmi.End();
+ return ast_context()->ReturnValue(Pop());
+ }
}
@@ -12048,6 +12254,24 @@ void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
+ DCHECK_EQ(1, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ Callable callable = CodeFactory::ToString(isolate());
+ HValue* input = Pop();
+ if (input->type().IsString()) {
+ return ast_context()->ReturnValue(input);
+ } else {
+ HValue* stub = Add<HConstant>(callable.code());
+ HValue* values[] = {context(), input};
+ HInstruction* result =
+ New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+ }
+}
+
+
void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12351,8 +12575,7 @@ void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
HValue* right = Pop();
HValue* left = Pop();
- HInstruction* result =
- NewUncasted<HStringAdd>(left, right, strength(function_language_mode()));
+ HInstruction* result = NewUncasted<HStringAdd>(left, right);
return ast_context()->ReturnInstruction(result, call->id());
}
@@ -12367,16 +12590,6 @@ void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
}
-// Fast support for StringCompare.
-void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
- DCHECK_EQ(2, call->arguments()->length());
- CHECK_ALIVE(VisitExpressions(call->arguments()));
- PushArgumentsFromEnvironment(call->arguments()->length());
- HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HOptimizedGraphBuilder::GenerateStringGetLength(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12449,6 +12662,23 @@ void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
}
+// Fast support for calls.
+void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
+ DCHECK_LE(2, call->arguments()->length());
+ CHECK_ALIVE(VisitExpressions(call->arguments()));
+ CallTrampolineDescriptor descriptor(isolate());
+ PushArgumentsFromEnvironment(call->arguments()->length() - 1);
+ HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
+ HValue* target = Pop();
+ HValue* values[] = {context(), target,
+ Add<HConstant>(call->arguments()->length() - 2)};
+ HInstruction* result = New<HCallWithDescriptor>(
+ trampoline, call->arguments()->length() - 1, descriptor,
+ Vector<HValue*>(values, arraysize(values)));
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
// Fast call for custom callbacks.
void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
// 1 ~ The function to call is not itself an argument to the call.
@@ -12554,6 +12784,18 @@ void HOptimizedGraphBuilder::GenerateUnlikely(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateHasInPrototypeChain(CallRuntime* call) {
+ DCHECK_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* prototype = Pop();
+ HValue* object = Pop();
+ HHasInPrototypeChainAndBranch* result =
+ New<HHasInPrototypeChainAndBranch>(object, prototype);
+ return ast_context()->ReturnControl(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12586,6 +12828,17 @@ void HOptimizedGraphBuilder::GenerateTheHole(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateCreateIterResultObject(CallRuntime* call) {
+ DCHECK_EQ(2, call->arguments()->length());
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+ HValue* done = Pop();
+ HValue* value = Pop();
+ HValue* result = BuildCreateIterResultObject(value, done);
+ return ast_context()->ReturnValue(result);
+}
+
+
void HOptimizedGraphBuilder::GenerateJSCollectionGetTable(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12760,53 +13013,6 @@ void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateGetPrototype(CallRuntime* call) {
- DCHECK(call->arguments()->length() == 1);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- HValue* object = Pop();
-
- NoObservableSideEffectsScope no_effects(this);
-
- HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
- HValue* bit_field =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
- HValue* is_access_check_needed_mask =
- Add<HConstant>(1 << Map::kIsAccessCheckNeeded);
- HValue* is_access_check_needed_test = AddUncasted<HBitwise>(
- Token::BIT_AND, bit_field, is_access_check_needed_mask);
-
- HValue* proto =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForPrototype());
- HValue* proto_map =
- Add<HLoadNamedField>(proto, nullptr, HObjectAccess::ForMap());
- HValue* proto_bit_field =
- Add<HLoadNamedField>(proto_map, nullptr, HObjectAccess::ForMapBitField());
- HValue* is_hidden_prototype_mask =
- Add<HConstant>(1 << Map::kIsHiddenPrototype);
- HValue* is_hidden_prototype_test = AddUncasted<HBitwise>(
- Token::BIT_AND, proto_bit_field, is_hidden_prototype_mask);
-
- {
- IfBuilder needs_runtime(this);
- needs_runtime.If<HCompareNumericAndBranch>(
- is_access_check_needed_test, graph()->GetConstant0(), Token::NE);
- needs_runtime.OrIf<HCompareNumericAndBranch>(
- is_hidden_prototype_test, graph()->GetConstant0(), Token::NE);
-
- needs_runtime.Then();
- {
- Add<HPushArguments>(object);
- Push(Add<HCallRuntime>(
- call->name(), Runtime::FunctionForId(Runtime::kGetPrototype), 1));
- }
-
- needs_runtime.Else();
- Push(proto);
- }
- return ast_context()->ReturnValue(Pop());
-}
-
-
#undef CHECK_BAILOUT
#undef CHECK_ALIVE
@@ -12995,6 +13201,12 @@ void HEnvironment::Drop(int count) {
}
+void HEnvironment::Print() const {
+ OFStream os(stdout);
+ os << *this << "\n";
+}
+
+
HEnvironment* HEnvironment::Copy() const {
return new(zone()) HEnvironment(this, zone());
}
@@ -13110,16 +13322,13 @@ std::ostream& operator<<(std::ostream& os, const HEnvironment& env) {
void HTracer::TraceCompilation(CompilationInfo* info) {
Tag tag(this, "compilation");
+ base::SmartArrayPointer<char> name = info->GetDebugName();
if (info->IsOptimizing()) {
- Handle<String> name = info->literal()->debug_name();
- PrintStringProperty("name", name->ToCString().get());
+ PrintStringProperty("name", name.get());
PrintIndent();
- trace_.Add("method \"%s:%d\"\n",
- name->ToCString().get(),
- info->optimization_id());
+ trace_.Add("method \"%s:%d\"\n", name.get(), info->optimization_id());
} else {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- PrintStringProperty("name", CodeStub::MajorName(major_key, false));
+ PrintStringProperty("name", name.get());
PrintStringProperty("method", "stub");
}
PrintLongProperty("date",
@@ -13356,7 +13565,7 @@ void HTracer::FlushToFile() {
void HStatistics::Initialize(CompilationInfo* info) {
- if (info->shared_info().is_null()) return;
+ if (!info->has_shared_info()) return;
source_size_ += info->shared_info()->SourceSize();
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index c6953cdad5..c1215a33ba 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -613,6 +613,8 @@ class HEnvironment final : public ZoneObject {
void SetExpressionStackAt(int index_from_top, HValue* value);
HValue* RemoveExpressionStackAt(int index_from_top);
+ void Print() const;
+
HEnvironment* Copy() const;
HEnvironment* CopyWithoutHistory() const;
HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
@@ -1356,6 +1358,9 @@ class HGraphBuilder {
HValue* hash,
LanguageMode language_mode);
+ // ES6 section 7.4.7 CreateIterResultObject ( value, done )
+ HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
+
HValue* BuildRegExpConstructResult(HValue* length,
HValue* index,
HValue* input);
@@ -1443,7 +1448,7 @@ class HGraphBuilder {
ElementsKind kind,
HValue *dependency = NULL);
- HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
+ HValue* AddLoadJSBuiltin(int context_index);
HValue* EnforceNumberType(HValue* number, Type* expected);
HValue* TruncateToNumber(HValue* value, Type** expected);
@@ -1889,6 +1894,9 @@ class HGraphBuilder {
// the SourcePosition assuming that this position corresponds to the
// same function as current position_.
SourcePosition ScriptPositionToSourcePosition(int position) {
+ if (position == RelocInfo::kNoPosition) {
+ return SourcePosition::Unknown();
+ }
SourcePosition pos = position_;
pos.set_position(position - start_position_);
return pos;
@@ -2000,10 +2008,9 @@ inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
template<>
inline HCallRuntime* HGraphBuilder::Add<HCallRuntime>(
- Handle<String> name,
const Runtime::Function* c_function,
int argument_count) {
- HCallRuntime* instr = New<HCallRuntime>(name, c_function, argument_count);
+ HCallRuntime* instr = New<HCallRuntime>(c_function, argument_count);
if (graph()->info()->IsStub()) {
// When compiling code stubs, we don't want to save all double registers
// upon entry to the stub, but instead have the call runtime instruction
@@ -2020,21 +2027,42 @@ inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
Handle<String> name,
const Runtime::Function* c_function,
int argument_count) {
- return Add<HCallRuntime>(name, c_function, argument_count);
+ return Add<HCallRuntime>(c_function, argument_count);
}
-template<>
-inline HContext* HGraphBuilder::New<HContext>() {
- return HContext::New(zone());
+template <>
+inline HParameter* HGraphBuilder::New<HParameter>(unsigned index) {
+ return HParameter::New(isolate(), zone(), nullptr, index);
}
-template<>
-inline HInstruction* HGraphBuilder::NewUncasted<HContext>() {
- return New<HContext>();
+template <>
+inline HParameter* HGraphBuilder::New<HParameter>(
+ unsigned index, HParameter::ParameterKind kind) {
+ return HParameter::New(isolate(), zone(), nullptr, index, kind);
}
+
+template <>
+inline HParameter* HGraphBuilder::New<HParameter>(
+ unsigned index, HParameter::ParameterKind kind, Representation r) {
+ return HParameter::New(isolate(), zone(), nullptr, index, kind, r);
+}
+
+
+template <>
+inline HPrologue* HGraphBuilder::New<HPrologue>() {
+ return HPrologue::New(zone());
+}
+
+
+template <>
+inline HContext* HGraphBuilder::New<HContext>() {
+ return HContext::New(zone());
+}
+
+
class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
public:
// A class encapsulating (lazily-allocated) break and continue blocks for
@@ -2181,6 +2209,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(IsRegExp) \
F(IsJSProxy) \
F(IsConstructCall) \
+ F(Call) \
F(CallFunction) \
F(ArgumentsLength) \
F(Arguments) \
@@ -2194,8 +2223,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(OneByteSeqStringSetChar) \
F(TwoByteSeqStringSetChar) \
F(ObjectEquals) \
- F(IsObject) \
+ F(ToInteger) \
F(ToObject) \
+ F(ToString) \
F(IsFunction) \
F(IsSpecObject) \
F(MathPow) \
@@ -2207,13 +2237,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(StringCharCodeAt) \
F(StringAdd) \
F(SubString) \
- F(StringCompare) \
F(RegExpExec) \
F(RegExpConstructResult) \
F(NumberToString) \
F(DebugIsActive) \
F(Likely) \
F(Unlikely) \
+ F(HasInPrototypeChain) \
/* Typed Arrays */ \
F(TypedArrayInitialize) \
F(DataViewInitialize) \
@@ -2242,9 +2272,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
F(JSCollectionGetTable) \
F(StringGetRawHashField) \
F(TheHole) \
+ /* ES6 Iterators */ \
+ F(CreateIterResultObject) \
/* Arrays */ \
F(HasFastPackedElements) \
- F(GetPrototype) \
/* Strings */ \
F(StringGetLength) \
/* JSValue */ \
@@ -2514,7 +2545,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
public:
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
PropertyAccessType access_type, Handle<Map> map,
- Handle<String> name)
+ Handle<Name> name)
: builder_(builder),
access_type_(access_type),
map_(map),
@@ -2540,7 +2571,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
bool NeedsWrappingFor(Handle<JSFunction> target) const;
Handle<Map> map();
- Handle<String> name() const { return name_; }
+ Handle<Name> name() const { return name_; }
bool IsJSObjectFieldAccessor() {
int offset; // unused
@@ -2551,10 +2582,10 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int offset;
if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
if (IsStringType()) {
- DCHECK(String::Equals(isolate()->factory()->length_string(), name_));
+ DCHECK(Name::Equals(isolate()->factory()->length_string(), name_));
*access = HObjectAccess::ForStringLength();
} else if (IsArrayType()) {
- DCHECK(String::Equals(isolate()->factory()->length_string(), name_));
+ DCHECK(Name::Equals(isolate()->factory()->length_string(), name_));
*access = HObjectAccess::ForArrayLength(map_->elements_kind());
} else {
*access = HObjectAccess::ForMapAndOffset(map_, offset);
@@ -2687,7 +2718,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
HOptimizedGraphBuilder* builder_;
PropertyAccessType access_type_;
Handle<Map> map_;
- Handle<String> name_;
+ Handle<Name> name_;
Handle<JSObject> holder_;
Handle<JSFunction> accessor_;
Handle<JSObject> api_holder_;
@@ -2835,9 +2866,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
- HInstruction* NewPlainFunctionCall(HValue* fun,
- int argument_count,
- bool pass_argument_count);
+ HInstruction* NewPlainFunctionCall(HValue* fun, int argument_count);
HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
int argument_count,
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
index b825ece6bc..b9d659c442 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/i18n.js
@@ -19,6 +19,7 @@
var ArrayIndexOf;
var ArrayJoin;
+var ArrayPush;
var IsFinite;
var IsNaN;
var GlobalBoolean = global.Boolean;
@@ -27,6 +28,8 @@ var GlobalNumber = global.Number;
var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var MathFloor;
+var ObjectDefineProperties = utils.ImportNow("ObjectDefineProperties");
+var ObjectDefineProperty = utils.ImportNow("ObjectDefineProperty");
var RegExpTest;
var StringIndexOf;
var StringLastIndexOf;
@@ -39,6 +42,7 @@ var StringSubstring;
utils.Import(function(from) {
ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
+ ArrayPush = from.ArrayPush;
IsFinite = from.IsFinite;
IsNaN = from.IsNaN;
MathFloor = from.MathFloor;
@@ -53,11 +57,6 @@ utils.Import(function(from) {
ToNumber = from.ToNumber;
});
-utils.ImportNow(function(from) {
- ObjectDefineProperties = from.ObjectDefineProperties;
- ObjectDefineProperty = from.ObjectDefineProperty;
-});
-
// -------------------------------------------------------------------
var Intl = {};
@@ -298,7 +297,7 @@ function lookupSupportedLocalesOf(requestedLocales, availableLocales) {
do {
if (!IS_UNDEFINED(availableLocales[locale])) {
// Push requested locale not the resolved one.
- %_CallFunction(matchedLocales, requestedLocales[i], $arrayPush);
+ %_CallFunction(matchedLocales, requestedLocales[i], ArrayPush);
break;
}
// Truncate locale if possible, if not break.
@@ -715,7 +714,7 @@ function initializeLocaleList(locales) {
} else {
// We allow single string localeID.
if (typeof locales === 'string') {
- %_CallFunction(seen, canonicalizeLanguageTag(locales), $arrayPush);
+ %_CallFunction(seen, canonicalizeLanguageTag(locales), ArrayPush);
return freezeArray(seen);
}
@@ -729,7 +728,7 @@ function initializeLocaleList(locales) {
var tag = canonicalizeLanguageTag(value);
if (%_CallFunction(seen, tag, ArrayIndexOf) === -1) {
- %_CallFunction(seen, tag, $arrayPush);
+ %_CallFunction(seen, tag, ArrayPush);
}
}
}
@@ -775,7 +774,7 @@ function isValidLanguageTag(locale) {
if (%_CallFunction(GetLanguageVariantRE(), value, RegExpTest) &&
extensions.length === 0) {
if (%_CallFunction(variants, value, ArrayIndexOf) === -1) {
- %_CallFunction(variants, value, $arrayPush);
+ %_CallFunction(variants, value, ArrayPush);
} else {
return false;
}
@@ -783,7 +782,7 @@ function isValidLanguageTag(locale) {
if (%_CallFunction(GetLanguageSingletonRE(), value, RegExpTest)) {
if (%_CallFunction(extensions, value, ArrayIndexOf) === -1) {
- %_CallFunction(extensions, value, $arrayPush);
+ %_CallFunction(extensions, value, ArrayPush);
} else {
return false;
}
@@ -2006,10 +2005,10 @@ OverrideFunction(GlobalString.prototype, 'normalize', function() {
}
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
var formArg = %_Arguments(0);
- var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING_INLINE(formArg);
+ var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 70b7a6727b..17ae01ad53 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -133,7 +133,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -199,7 +199,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -280,7 +280,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -305,7 +305,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
@@ -459,7 +459,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(p, sizeof(int32_t));
+ Assembler::FlushICacheWithoutIsolate(p, sizeof(int32_t));
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 2e17fcb007..9f64a6005f 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -1587,12 +1587,12 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
}
-void Assembler::j(Condition cc, Handle<Code> code) {
+void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F);
EMIT(0x80 | cc);
- emit(code, RelocInfo::CODE_TARGET);
+ emit(code, rmode);
}
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 3daa294aae..57987bc751 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -856,7 +856,8 @@ class Assembler : public AssemblerBase {
Label* L,
Label::Distance distance = Label::kFar);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> code);
+ void j(Condition cc, Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Floating-point operations
void fld(int i);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 7a055bd876..ccdd01c7a3 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -24,13 +24,20 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- eax : number of arguments excluding receiver
// -- edi : called function (only guaranteed when
// extra_args requires it)
- // -- esi : context
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
// -- esp[4 * argc] : first argument (argc == eax)
// -- esp[4 * (argc +1)] : receiver
// -----------------------------------
+ __ AssertFunction(edi);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -100,8 +107,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
@@ -109,9 +115,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- edx: original constructor
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
@@ -192,9 +195,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
- if (create_memento) {
- __ add(edi, Immediate(AllocationMemento::kSize));
- }
__ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
@@ -203,7 +203,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
- // edi: start of next object (including memento if create_memento)
+ // edi: start of next object
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
__ mov(ecx, factory->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
@@ -211,7 +211,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Set extra fields in the newly allocated object.
// eax: initial map
// ebx: JSObject
- // edi: start of next object (including memento if create_memento)
+ // edi: start of next object
// esi: slack tracking counter (non-API function case)
__ mov(edx, factory->undefined_value());
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
@@ -244,22 +244,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ lea(esi, Operand(edi, -AllocationMemento::kSize));
- __ InitializeFieldsWithFiller(ecx, esi, edx);
-
- // Fill in memento fields if necessary.
- // esi: points to the allocated but uninitialized memento.
- __ mov(Operand(esi, AllocationMemento::kMapOffset),
- factory->allocation_memento_map());
- // Get the cell or undefined.
- __ mov(edx, Operand(esp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(edx);
- __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
- edx);
- } else {
- __ InitializeFieldsWithFiller(ecx, edi, edx);
- }
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -275,12 +260,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edx: original constructor
__ bind(&rt_call);
int offset = kPointerSize;
- if (create_memento) {
- // Get the cell or allocation site.
- __ mov(edi, Operand(esp, kPointerSize * 3));
- __ push(edi); // argument 1: allocation site
- offset += kPointerSize;
- }
// Must restore esi (context) and edi (constructor) before calling
// runtime.
@@ -288,35 +267,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(edi, Operand(esp, offset));
__ push(edi); // argument 2/1: constructor function
__ push(edx); // argument 3/2: original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mov(ebx, eax); // store result in ebx
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
- if (create_memento) {
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ cmp(ecx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &count_incremented);
- // ecx is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
- Immediate(Smi::FromInt(1)));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ pop(edx); // new.target
__ pop(edi); // Constructor function.
@@ -405,12 +362,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -492,7 +449,6 @@ enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
// Clobbers ecx, edx, edi; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset,
IsTagged eax_is_tagged) {
// eax : the number of items to be pushed to the stack
//
@@ -517,12 +473,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(ebp, calleeOffset)); // push this
- if (eax_is_tagged == kEaxIsUntaggedInt) {
- __ SmiTag(eax);
- }
- __ push(eax);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -538,15 +489,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ mov(esi, Operand::StaticVariable(context_address));
+
// Load the previous frame pointer (ebx) to access C arguments
__ mov(ebx, Operand(ebp, 0));
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
// Push the function and the receiver onto the stack.
- __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
__ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
// Load the number of arguments and setup pointer to the arguments.
@@ -554,17 +506,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
// Copy arguments to the stack in a loop.
Label loop, entry;
__ Move(ecx, Immediate(0));
- __ jmp(&entry);
+ __ jmp(&entry, Label::kNear);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
__ push(Operand(edx, 0)); // dereference handle
@@ -573,21 +521,18 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmp(ecx, eax);
__ j(not_equal, &loop);
- // Get the function from the stack and call it.
- // kPointerSize for the receiver.
- __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
+
+ // Get the new.target and function from the frame.
+ __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
+ __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
// Invoke the code.
- if (is_construct) {
- // No type feedback cell is available
- __ mov(ebx, masm->isolate()->factory()->undefined_value());
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper());
- }
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the internal frame. Notice that this also removes the empty.
// context and the function left on the stack by the code
@@ -660,7 +605,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -725,8 +670,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ add(kInterpreterDispatchTableRegister,
Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
- // TODO(rmcilroy) Push our context as a stack located parameter of the
- // bytecode handler.
+ // Push context as a stack located parameter to the bytecode handler.
+ DCHECK_EQ(-1, kInterpreterContextSpillSlot);
+ __ push(esi);
// Dispatch to the first bytecode handler for the function.
__ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
@@ -751,9 +697,14 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ leave();
- // Return droping receiver + arguments.
- // TODO(rmcilroy): Get number of arguments from BytecodeArray.
- __ Ret(1 * kPointerSize, ecx);
+
+ // Drop receiver + arguments and return.
+ __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ pop(ecx);
+ __ add(esp, ebx);
+ __ push(ecx);
+ __ ret(0);
}
@@ -950,161 +901,50 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- Factory* factory = masm->isolate()->factory();
-
+ // Stack Layout:
+ // esp[0] : Return address
+ // esp[8] : Argument n
+ // esp[16] : Argument n-1
+ // ...
+ // esp[8 * n] : Argument 1
+ // esp[8 * (n + 1)] : Receiver (callable to call)
+ //
+ // eax contains the number of arguments, n, not counting the receiver.
+ //
// 1. Make sure we have at least one argument.
- { Label done;
+ {
+ Label done;
__ test(eax, eax);
- __ j(not_zero, &done);
- __ pop(ebx);
- __ push(Immediate(factory->undefined_value()));
- __ push(ebx);
+ __ j(not_zero, &done, Label::kNear);
+ __ PopReturnAddressTo(ebx);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(ebx);
__ inc(eax);
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- // 1 ~ return address.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ JumpIfSmi(edi, &non_function);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives (shared already in ebx).
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Compute the receiver in sloppy mode.
- __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
-
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &convert_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_proxy);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_proxy);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- { // In order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ push(eax);
-
- __ mov(eax, ebx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ebx, eax);
- __ Move(edx, Immediate(0)); // restore
-
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- // Restore the function to edi.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- __ bind(&use_global_proxy);
- __ mov(ebx,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ mov(Operand(esp, eax, times_4, 0), ebx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Move(edx, Immediate(1)); // indicate function proxy
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Move(edx, Immediate(2)); // indicate non-function
+ // 2. Get the callable to call (passed as receiver) from the stack.
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ mov(Operand(esp, eax, times_4, 0), edi);
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
+ {
+ Label loop;
__ mov(ecx, eax);
__ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_4, 0));
- __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+ __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+ __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
__ dec(ecx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
+ __ pop(ebx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ test(edx, edx);
- __ j(zero, &function);
- __ Move(ebx, Immediate(0));
- __ cmp(edx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ pop(edx); // return address
- __ push(edi); // re-add proxy object as additional argument
- __ push(edx);
- __ inc(eax);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ SmiUntag(ebx);
- __ cmp(eax, ebx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
-
- ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1183,100 +1023,32 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
__ push(edi);
- __ push(Operand(ebp, kFunctionOffset)); // push this
+ __ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
- Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
+ Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
// Push current index and limit.
const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ mov(ebx, Operand(ebp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver, use_global_proxy;
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- Factory* factory = masm->isolate()->factory();
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in sloppy mode.
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &call_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_proxy);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_proxy);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &push_receiver);
-
- __ bind(&call_to_object);
- __ mov(eax, ebx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ebx, eax);
- __ jmp(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ mov(ebx,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
+ __ Push(eax); // limit
+ __ Push(Immediate(0)); // index
+ __ Push(Operand(ebp, kReceiverOffset)); // receiver
// Loop over the arguments array, pushing each value to the stack
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(eax);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
- frame_scope.GenerateLeaveFrame();
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(edi); // add function proxy as last argument
- __ inc(eax);
- __ Move(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
// Leave internal frame.
}
@@ -1326,9 +1098,10 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ push(Operand(ebp, kFunctionOffset));
__ push(Operand(ebp, kArgumentsOffset));
__ push(Operand(ebp, kNewTargetOffset));
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
- Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
+ Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
// Push current index and limit.
const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
@@ -1432,7 +1205,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
@@ -1440,120 +1214,137 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ Assert(equal, kUnexpectedStringFunction);
+ // 1. Load the first argument into eax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, ebx);
}
- // Load the first argument into eax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ test(eax, eax);
- __ j(zero, &no_arguments);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ pop(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ push(ecx);
- __ mov(eax, ebx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
- // ----------- S t a t e -------------
- // -- ebx : argument converted to string
- // -- edi : constructor function
- // -- esp[0] : return address
- // -----------------------------------
+ // 2a. At least one argument, return eax if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(eax, &to_string, Label::kNear);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+ __ j(above, &to_string, Label::kNear);
+ __ j(equal, &symbol_descriptive_string, Label::kNear);
+ __ Ret();
+ }
- // Allocate a JSValue and put the tagged pointer into eax.
- Label gc_required;
- __ Allocate(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
- JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
- __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+ __ ret(1 * kPointerSize);
}
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
- // Set properties and elements.
- Factory* factory = masm->isolate()->factory();
- __ Move(ecx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+ // 3a. Convert eax to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ }
- // Set the value.
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+ // 3b. Convert symbol in eax to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ PopReturnAddressTo(ecx);
+ __ Push(eax);
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- // We're done. Return.
- __ ret(0);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ mov(ebx, eax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into ebx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
+ // 1. Load the first argument into ebx and get rid of the rest (including the
+ // receiver).
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
+ Label no_arguments, done;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ LoadRoot(ebx, Heap::kempty_stringRootIndex);
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
}
- __ mov(ebx, eax);
- __ jmp(&argument_is_string);
- // Load the empty string into ebx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ Move(ebx, Immediate(factory->empty_string()));
- __ pop(ecx);
- __ lea(esp, Operand(esp, kPointerSize));
- __ push(ecx);
- __ jmp(&argument_is_string);
+ // 2. Make sure ebx is a string.
+ {
+ Label convert, done_convert;
+ __ JumpIfSmi(ebx, &convert, Label::kNear);
+ __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, edx);
+ __ j(below, &done_convert);
+ __ bind(&convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(edi);
+ __ Move(eax, ebx);
+ __ CallStub(&stub);
+ __ Move(ebx, eax);
+ __ Pop(edi);
+ }
+ __ bind(&done_convert);
+ }
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- ebx : the first argument
+ // -- edi : constructor function
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in eax.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edi);
+ __ Push(Smi::FromInt(JSValue::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(edi);
+ __ Pop(ebx);
+ }
+ __ jmp(&done_allocate);
}
- __ ret(0);
}
@@ -1618,6 +1409,258 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(edi);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
+ (1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_zero, &done_convert);
+ {
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ecx : the receiver
+ // -- edx : the shared function info.
+ // -- edi : the function to call (checked to be a JSFunction)
+ // -- esi : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ j(above_equal, &done_convert);
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
+ Label::kNear);
+ __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(ecx);
+ }
+ __ jmp(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ Push(edi);
+ __ mov(eax, ecx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ecx, eax);
+ __ Pop(edi);
+ __ Pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the shared function info.
+ // -- edi : the function to call (checked to be a JSFunction)
+ // -- esi : the function context.
+ // -----------------------------------
+
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(ebx);
+ ParameterCount actual(eax);
+ ParameterCount expected(ebx);
+ __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
+ actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(edi, &non_callable);
+ __ bind(&non_smi);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ mov(edi, FieldOperand(edi, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(edi);
+ __ jmp(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &non_callable, Label::kNear);
+ // Overwrite the original receiver with the (original) target.
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor (checked to be a JSFunction)
+ // -- edi : the constructor to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(edx);
+ __ AssertFunction(edi);
+
+ // Calling convention for function specific ConstructStubs require
+ // ebx to contain either an AllocationSite or undefined.
+ __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- edi : the constructor to call (checked to be a JSFunctionProxy)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ mov(edi, FieldOperand(edi, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- edi : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(edi, &non_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Dispatch based on instance type.
+ __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- edi : the target to call (can be any Object).
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(edx);
+
+ // Find the address of the last argument.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+ __ shl(ecx, kPointerSizeLog2);
+ __ neg(ecx);
+ __ add(ecx, ebx);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(ebx, 0));
+ __ sub(ebx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmp(ebx, ecx);
+ __ j(greater, &loop_header, Label::kNear);
+
+ // Call the target.
+ __ Push(edx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
@@ -1644,16 +1687,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(eax, Operand(ebp, eax, times_4, offset));
- __ mov(edi, -1); // account for receiver
+ __ lea(edi, Operand(ebp, eax, times_4, offset));
+ __ mov(eax, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ inc(edi);
- __ push(Operand(eax, 0));
- __ sub(eax, Immediate(kPointerSize));
- __ cmp(edi, ebx);
+ __ inc(eax);
+ __ push(Operand(edi, 0));
+ __ sub(edi, Immediate(kPointerSize));
+ __ cmp(eax, ebx);
__ j(less, &copy);
+ // eax now contains the expected number of arguments.
__ jmp(&invoke);
}
@@ -1682,6 +1726,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ // Remember expected arguments in ecx.
+ __ mov(ecx, ebx);
+
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
@@ -1706,12 +1753,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
__ cmp(eax, ebx);
__ j(less, &fill);
+
+ // Restore expected arguments.
+ __ mov(eax, ecx);
}
// Call the entry point.
__ bind(&invoke);
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ // eax : expected number of arguments
+ // edi : function (passed through to callee)
__ call(edx);
// Store offset of return address for deoptimizer.
@@ -1731,7 +1783,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ int3();
}
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 53e9e96cdb..37e1876f3d 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -8,6 +8,7 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/ia32/code-stubs-ia32.h"
#include "src/ia32/frames-ia32.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -791,72 +792,78 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
// esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
+
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime, Label::kNear);
// Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
__ bind(&runtime);
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
// esp[0] : return address
- // esp[4] : number of parameters (tagged)
- // esp[8] : receiver displacement
- // esp[12] : function
- // ebx = parameter count (tagged)
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- // TODO(rossberg): Factor out some of the bits that are shared with the other
- // Generate* functions.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
- __ mov(ecx, ebx);
+ __ mov(ebx, ecx);
+ __ push(ecx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
+ __ mov(ebx, ecx);
+ __ push(ecx);
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
// ebx = parameter count (tagged)
// ecx = argument count (smi-tagged)
- // esp[4] = parameter count (tagged)
- // esp[8] = address of receiver argument
// Compute the mapped parameter count = min(ebx, ecx) in ebx.
__ cmp(ebx, ecx);
__ j(less_equal, &try_allocate, Label::kNear);
__ mov(ebx, ecx);
+ // Save mapped parameter count and function.
__ bind(&try_allocate);
-
- // Save mapped parameter count.
+ __ push(edi);
__ push(ebx);
// Compute the sizes of backing store, parameter map, and arguments object.
@@ -876,13 +883,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
// eax = address of new object(s) (tagged)
// ecx = argument count (smi-tagged)
// esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
// esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -905,8 +912,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ecx = argument count (smi-tagged)
// edi = address of arguments map (tagged)
// esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
// esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
// Copy the JS object part.
__ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -916,11 +923,11 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ AssertNotSmi(edx);
+ __ mov(edi, Operand(esp, 1 * kPointerSize));
+ __ AssertNotSmi(edi);
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- edx);
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edi);
// Use the length (smi tagged) and set that as an in-object property too.
__ AssertSmi(ecx);
@@ -938,11 +945,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// eax = address of new object (tagged)
// ebx = mapped parameter count (tagged)
// ecx = argument count (tagged)
+ // edx = address of receiver argument
// edi = address of parameter map or backing store (tagged)
// esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
// esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Free a register.
+ // Free two registers.
+ __ push(edx);
__ push(eax);
// Initialize parameter map. If there are no mapped arguments, we're done.
@@ -968,9 +977,9 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// We loop from right to left.
Label parameters_loop, parameters_test;
__ push(ecx);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ add(ebx, Operand(esp, 5 * kPointerSize));
__ sub(ebx, eax);
__ mov(ecx, isolate()->factory()->the_hole_value());
__ mov(edx, edi);
@@ -982,9 +991,10 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// edi = address of backing store (tagged)
// esp[0] = argument count (tagged)
// esp[4] = address of new object (tagged)
- // esp[8] = mapped parameter count (tagged)
- // esp[16] = parameter count (tagged)
- // esp[20] = address of receiver argument
+ // esp[8] = address of receiver argument
+ // esp[12] = mapped parameter count (tagged)
+ // esp[16] = function
+ // esp[20] = parameter count (tagged)
__ jmp(&parameters_test, Label::kNear);
__ bind(&parameters_loop);
@@ -1002,17 +1012,18 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ecx = argument count (tagged)
// edi = address of backing store (tagged)
// esp[0] = address of new object (tagged)
- // esp[4] = mapped parameter count (tagged)
- // esp[12] = parameter count (tagged)
- // esp[16] = address of receiver argument
+ // esp[4] = address of receiver argument
+ // esp[8] = mapped parameter count (tagged)
+ // esp[12] = function
+ // esp[16] = parameter count (tagged)
// Copy arguments header and remaining slots (if there are any).
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
__ sub(edx, ebx); // Is there a smarter way to do negative scaling?
__ sub(edx, ebx);
__ jmp(&arguments_test, Label::kNear);
@@ -1029,57 +1040,60 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Restore.
__ pop(eax); // Address of arguments object.
- __ pop(ebx); // Parameter count.
+ __ Drop(4);
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
+ // Return.
+ __ ret(0);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ pop(eax); // Remove saved parameter count.
- __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ pop(eax); // Remove saved mapped parameter count.
+ __ pop(edi); // Pop saved function.
+ __ pop(eax); // Remove saved parameter count.
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
// esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate, Label::kNear);
+ // Check if the calling frame is an arguments adaptor frame.
+ Label try_allocate, runtime;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &try_allocate, Label::kNear);
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ mov(Operand(esp, 2 * kPointerSize), edx);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ test(ecx, ecx);
+ __ mov(eax, ecx);
+ __ test(eax, eax);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
+ __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
- __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+ __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -1095,7 +1109,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
__ AssertSmi(ecx);
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
@@ -1106,17 +1119,14 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ test(ecx, ecx);
__ j(zero, &done, Label::kNear);
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(isolate()->factory()->fixed_array_map()));
-
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
// Untag the length for the loop below.
__ SmiUntag(ecx);
@@ -1130,42 +1140,21 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ dec(ecx);
__ j(not_zero, &loop);
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ ret(3 * kPointerSize);
+ __ ret(0);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : language mode
- // esp[8] : index of rest parameter
- // esp[12] : number of parameters
- // esp[16] : receiver displacement
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 4 * kPointerSize), edx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1892,25 +1881,21 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ push(eax);
// Figure out which native to call and setup the arguments.
- if (cc == equal && strict()) {
+ if (cc == equal) {
__ push(ecx);
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = Builtins::EQUALS;
- } else {
- builtin =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- }
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
// Restore return address on the stack.
__ push(ecx);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1988,27 +1973,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
- __ j(not_equal, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &miss);
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done, Label::kFar);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
__ bind(&miss);
@@ -2027,24 +2010,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ jmp(&done);
-
- __ bind(&not_array_function);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &not_array_function);
- CreateWeakCellStub create_stub(isolate);
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ CreateWeakCellStub weak_cell_stub(isolate);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2063,33 +2043,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(Isolate* isolate,
- MacroAssembler* masm,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, non_function);
- __ pop(ecx);
- __ push(edi); // put proxy as additional argument under return address
- __ push(ecx);
- __ Move(eax, Immediate(argc + 1));
- __ Move(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edi);
- __ Move(eax, Immediate(argc));
- __ Move(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
+static void EmitSlowCase(Isolate* isolate, MacroAssembler* masm, int argc) {
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2110,11 +2066,11 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// edi : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
@@ -2149,8 +2105,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- // (non_function is bound in EmitSlowCase)
- EmitSlowCase(masm->isolate(), masm, argc, &non_function);
+ EmitSlowCase(masm->isolate(), masm, argc);
}
if (call_as_method) {
@@ -2171,39 +2126,31 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// ecx : original constructor (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
- Label slow, non_function_call;
if (IsSuperConstructorCall()) {
__ push(ecx);
}
+ Label non_function;
// Check that function is not a smi.
- __ JumpIfSmi(edi, &non_function_call);
+ __ JumpIfSmi(edi, &non_function);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
+ __ j(not_equal, &non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into ebx.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by edx + 1.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map =
- isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(ebx);
}
@@ -2215,69 +2162,33 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mov(edx, edi);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = ecx;
- __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // edi: called object
- // eax: number of arguments
- // ecx: object map
- // esp[0]: original receiver (for IsSuperConstructorCall)
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- if (IsSuperConstructorCall()) {
- __ Drop(1);
- }
- // Set expected number of arguments to zero (not changing eax).
- __ Move(ebx, Immediate(0));
- Handle<Code> arguments_adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- __ mov(vector, FieldOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ if (IsSuperConstructorCall()) __ Drop(1);
+ __ mov(edx, edi);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// edi - function
// edx - slot id
// ebx - vector
- Label miss;
- int argc = arg_count();
- ParameterCount actual(argc);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
- __ j(not_equal, &miss);
+ __ j(not_equal, miss);
__ mov(eax, arg_count());
+ // Reload ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
- // Verify that ecx contains an AllocationSite
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- factory->allocation_site_map());
- __ j(not_equal, &miss);
-
// Increment the call count for monomorphic function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
@@ -2288,17 +2199,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
// Unreachable.
- __ int3();
}
@@ -2312,7 +2213,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2365,7 +2266,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(isolate, masm, argc, &non_function);
+ EmitSlowCase(isolate, masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2373,11 +2274,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
+ // Check if we have an allocation site.
+ __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
+ Heap::kAllocationSiteMapRootIndex);
+ __ j(not_equal, &not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -2446,7 +2357,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
@@ -2467,10 +2378,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ push(edx);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2718,233 +2626,108 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-// Generate stub code for instanceof.
-// This code can patch a call site inlined cache of the instance of check,
-// which looks like this.
-//
-// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
-// 75 0a jne <some near label>
-// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
-//
-// If call site patching is requested the stack will have the delta from the
-// return address to the cmp instruction just below the return address. This
-// also means that call site patching can only take place with arguments in
-// registers. TOS looks like this when call site patching is requested
-//
-// esp[0] : return address
-// esp[4] : delta from return address to cmp instruction
-//
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub.
- Register object = eax; // Object (lhs).
- Register map = ebx; // Map of the object.
- Register function = edx; // Function (rhs).
- Register prototype = edi; // Prototype of the function.
- Register scratch = ecx;
-
- // Constants describing the call site code to patch.
- static const int kDeltaToCmpImmediate = 2;
- static const int kDeltaToMov = 8;
- static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
- static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
- static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
-
- DCHECK_EQ(object.code(), InstanceofStub::left().code());
- DCHECK_EQ(function.code(), InstanceofStub::right().code());
-
- // Get the object and function - they are always both needed.
- Label slow, not_js_object;
- if (!HasArgsInRegisters()) {
- __ mov(object, Operand(esp, 2 * kPointerSize));
- __ mov(function, Operand(esp, 1 * kPointerSize));
- }
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = edx; // Object (lhs).
+ Register const function = eax; // Function (rhs).
+ Register const object_map = ecx; // Map of {object}.
+ Register const function_map = ebx; // Map of {function}.
+ Register const function_prototype = function_map; // Prototype of {function}.
+ Register const scratch = edi;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi, Label::kNear);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &fast_case, Label::kNear);
+ __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &fast_case, Label::kNear);
+ __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(0);
- // Check that the left hand is a JS object.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- __ bind(&miss);
- }
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+ __ j(not_equal, &slow_case);
+ __ LoadRoot(eax, Heap::kFalseValueRootIndex);
+ __ ret(0);
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+ __ j(not_equal, &slow_case);
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // Ensure that {function} has an instance prototype.
+ __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+ static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &slow_case);
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- } else {
- // The constants for the code patching are based on no push instructions
- // at the call site.
- DCHECK(HasArgsInRegisters());
- // Get return address and delta to inlined map check.
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
- __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
- }
- __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
- __ mov(Operand(scratch, 0), map);
- __ push(map);
- // Scratch points at the cell payload. Calculate the start of the object.
- __ sub(scratch, Immediate(Cell::kValueOffset - 1));
- __ RecordWriteField(scratch, Cell::kValueOffset, map, function,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(map);
- }
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ mov(shared_info,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ BooleanBitTest(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
+ __ j(not_zero, &slow_case);
- // Loop through the prototype chain of the object looking for the function
- // prototype.
- __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
- Label loop, is_instance, is_not_instance;
+ // Get the "prototype" (or initial map) of the {function}.
+ __ mov(function_prototype,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ Register const function_prototype_map = scratch;
+ __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
+ __ j(not_equal, &function_prototype_valid, Label::kNear);
+ __ mov(function_prototype,
+ FieldOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Label done, loop;
+ __ mov(eax, isolate()->factory()->true_value());
__ bind(&loop);
- __ cmp(scratch, prototype);
- __ j(equal, &is_instance, Label::kNear);
- Factory* factory = isolate()->factory();
- __ cmp(scratch, Immediate(factory->null_value()));
- __ j(equal, &is_not_instance, Label::kNear);
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(eax, Immediate(0));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->true_value());
- }
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->true_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Move(eax, Immediate(0));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- }
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->false_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow, Label::kNear);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, Label::kNear);
-
- // Null is not instance of anything.
- __ cmp(object, factory->null_value());
- __ j(not_equal, &object_not_null, Label::kNear);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- } else {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null);
- // Smi values is not instance of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- } else {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null_or_smi);
- // String values is not instance of anything.
- Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
- __ j(NegateCondition(is_string), &slow, Label::kNear);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- } else {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, function_prototype);
+ __ j(equal, &done, Label::kNear);
+ __ cmp(object_prototype, isolate()->factory()->null_value());
+ __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ j(not_equal, &loop);
+ __ mov(eax, isolate()->factory()->false_value());
+ __ bind(&done);
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(0);
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- // Tail call the builtin which returns 0 or 1.
- if (HasArgsInRegisters()) {
- // Push arguments below return address.
- __ pop(scratch);
- __ push(object);
- __ push(function);
- __ push(scratch);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- // Call the builtin and convert 0/1 to true/false.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- Label true_value, done;
- __ test(eax, eax);
- __ j(zero, &true_value, Label::kNear);
- __ mov(eax, factory->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(eax, factory->true_value());
- __ bind(&done);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ pop(scratch); // Pop return address.
+ __ push(object); // Push {object}.
+ __ push(function); // Push {function}.
+ __ push(scratch); // Push return address.
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -3393,7 +3176,42 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in eax.
+ Label is_number;
+ __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+ Label not_string;
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+ // eax: receiver
+ // edi: receiver map
+ __ j(above_equal, &not_string, Label::kNear);
+ __ Ret();
+ __ bind(&not_string);
+
+ Label not_heap_number;
+ __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpInstanceType(edi, ODDBALL_TYPE);
+ __ j(not_equal, &not_oddball, Label::kNear);
+ __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3523,41 +3341,39 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+ // ----------- S t a t e -------------
+ // -- edx : left string
+ // -- eax : right string
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertString(edx);
+ __ AssertString(eax);
Label not_same;
__ cmp(edx, eax);
__ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
+ __ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat one-byte strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(esp, Immediate(2 * kPointerSize));
- __ push(ecx);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
edi);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(eax);
+ __ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3590,6 +3406,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+ Label::Distance const miss_distance =
+ masm->emit_debug_code() ? Label::kFar : Label::kNear;
+
+ __ JumpIfSmi(edx, &miss, miss_distance);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ JumpIfSmi(eax, &miss, miss_distance);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+ __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+ __ AssertSmi(eax);
+ __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+ __ AssertSmi(edx);
+ __ xchg(eax, edx);
+ }
+ __ sub(eax, edx);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -3907,15 +3754,24 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ JumpIfSmi(ecx, &miss, Label::kNear);
__ GetWeakValue(edi, cell);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, edi);
+ __ cmp(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ j(not_equal, &miss, Label::kNear);
- __ cmp(ebx, edi);
+ __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ j(not_equal, &miss, Label::kNear);
- __ sub(eax, edx);
- __ ret(0);
+ if (Token::IsEqualityOp(op())) {
+ __ sub(eax, edx);
+ __ ret(0);
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(eax);
+ __ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4447,14 +4303,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4678,14 +4534,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4701,11 +4557,180 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+// value is on the stack already.
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector,
+ Register slot, Register feedback,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
+
+ __ push(receiver);
+ __ push(vector);
+
+ Register receiver_map = receiver;
+ Register cached_map = vector;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ mov(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+ __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+
+ // A named keyed store might have a 2 element array, all other cases can count
+ // on an array with at least 2 {map, handler} pairs, so they can go right
+ // into polymorphic array handling.
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &start_polymorphic);
+
+ // found, now call handler.
+ Register handler = feedback;
+ DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister()));
+ __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), handler);
+ __ pop(handler); // Pop "value".
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ // Polymorphic, we have to loop from 2 to N
+
+ // TODO(mvstanton): I think there is a bug here, we are assuming the
+ // array has more than one map/handler pair, but we call this function in the
+ // keyed store with a string key case, where it might be just an array of two
+ // elements.
+
+ __ bind(&start_polymorphic);
+ __ push(key);
+ Register counter = key;
+ __ mov(counter, Immediate(Smi::FromInt(2)));
+ __ bind(&next_loop);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ mov(Operand::StaticVariable(virtual_register), handler);
+ __ pop(handler); // Pop "value".
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ __ bind(&prepare_next);
+ __ add(counter, Immediate(Smi::FromInt(2)));
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector,
+ Register slot, Register weak_cell,
+ Label* miss) {
+ // The store ic value is on the stack.
+ DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
+
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+
+ // Move the weak map into the weak_cell register.
+ Register ic_map = weak_cell;
+ __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ cmp(ic_map, FieldOperand(receiver, 0));
+ __ j(not_equal, miss);
+ __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
+ // Put the store ic value back in it's register.
+ __ mov(Operand::StaticVariable(virtual_register), weak_cell);
+ __ pop(weak_cell); // Pop "value".
+ // jump to the handler.
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, miss);
+ __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), weak_cell);
+ __ pop(weak_cell); // Pop "value".
+ // jump to the handler.
+ __ jmp(Operand::StaticVariable(virtual_register));
+}
+
+
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
+ Register key = VectorStoreICDescriptor::NameRegister(); // ecx
+ Register value = VectorStoreICDescriptor::ValueRegister(); // eax
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
Label miss;
- // TODO(mvstanton): Implement.
+ __ push(value);
+
+ Register scratch = value;
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay;
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &miss);
+
+ __ pop(value);
+ __ push(slot);
+ __ push(vector);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
+ receiver, key, slot, no_reg);
+ __ pop(vector);
+ __ pop(slot);
+ Label no_pop_miss;
+ __ jmp(&no_pop_miss);
+
__ bind(&miss);
+ __ pop(value);
+ __ bind(&no_pop_miss);
StoreIC::GenerateMiss(masm);
}
@@ -4720,29 +4745,161 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
+ Register receiver, Register key,
+ Register vector, Register slot,
+ Register feedback, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label transition_call;
+ Label pop_and_miss;
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
+
+ __ push(receiver);
+ __ push(vector);
+
+ Register receiver_map = receiver;
+ Register cached_map = vector;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ mov(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+
+ // Polymorphic, we have to loop from 0 to N - 1
+ __ push(key);
+ // On the stack we have:
+ // key (esp)
+ // vector
+ // receiver
+ // value
+ Register counter = key;
+ __ mov(counter, Immediate(Smi::FromInt(0)));
+ __ bind(&next_loop);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &transition_call);
+ __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + 2 * kPointerSize));
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), feedback);
+ __ pop(feedback); // Pop "value".
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ __ bind(&transition_call);
+ // Oh holy hell this will be tough.
+ // The map goes in vector register.
+ __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(receiver, &pop_and_miss);
+ // slot goes on the stack, and holds return address.
+ __ xchg(slot, Operand(esp, 4 * kPointerSize));
+ // Get the handler in value.
+ __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + 2 * kPointerSize));
+ __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ // Pop key into place.
+ __ pop(key);
+ // Put the return address on top of stack, vector goes in slot.
+ __ xchg(slot, Operand(esp, 0));
+ // put the map on the stack, receiver holds receiver.
+ __ xchg(receiver, Operand(esp, 1 * kPointerSize));
+ // put the vector on the stack, slot holds value.
+ __ xchg(slot, Operand(esp, 2 * kPointerSize));
+ // feedback (value) = value, slot = handler.
+ __ xchg(feedback, slot);
+ __ jmp(slot);
+
+ __ bind(&prepare_next);
+ __ add(counter, Immediate(Smi::FromInt(3)));
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ bind(&pop_and_miss);
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
+ Register key = VectorStoreICDescriptor::NameRegister(); // ecx
+ Register value = VectorStoreICDescriptor::ValueRegister(); // eax
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
Label miss;
- // TODO(mvstanton): Implement.
+ __ push(value);
+
+ Register scratch = value;
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay;
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
+ &miss);
+
+ __ bind(&not_array);
+ Label try_poly_name;
+ __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &try_poly_name);
+
+ __ pop(value);
+
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, scratch);
+ __ j(not_equal, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
__ bind(&miss);
+ __ pop(value);
KeyedStoreIC::GenerateMiss(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, ebx);
+ __ EmitLoadTypeFeedbackVector(ebx);
CallICStub stub(isolate(), state());
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, ebx);
- CallIC_ArrayStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 3aa18f250a..c09b27b773 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -188,7 +188,7 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(), 7);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index ae8cbeb90f..93f4cee636 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ia32/codegen-ia32.h"
+
#if V8_TARGET_ARCH_IA32
#include "src/codegen.h"
@@ -63,7 +65,7 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
@@ -92,7 +94,7 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
@@ -503,7 +505,7 @@ MemMoveFunction CreateMemMoveFunction() {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
@@ -1030,7 +1032,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc
index 576a1c142a..255bdbba01 100644
--- a/deps/v8/src/ia32/frames-ia32.cc
+++ b/deps/v8/src/ia32/frames-ia32.cc
@@ -8,6 +8,7 @@
#include "src/frames.h"
#include "src/ia32/assembler-ia32-inl.h"
#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/frames-ia32.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 1d200c04b0..a5ce6a5f02 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -39,6 +39,7 @@ class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;
+ static const int kNewTargetArgOffset = +2 * kPointerSize;
static const int kFunctionArgOffset = +3 * kPointerSize;
static const int kReceiverArgOffset = +4 * kPointerSize;
static const int kArgcOffset = +5 * kPointerSize;
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 4724d1eb44..22d85d8cc3 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -30,11 +30,22 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
-const Register StoreTransitionDescriptor::MapRegister() {
- return FLAG_vector_stores ? no_reg : ebx;
+const Register VectorStoreTransitionDescriptor::SlotRegister() {
+ return no_reg;
}
+const Register VectorStoreTransitionDescriptor::VectorRegister() {
+ return no_reg;
+}
+
+
+const Register VectorStoreTransitionDescriptor::MapRegister() { return no_reg; }
+
+
+const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+
+
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
@@ -42,14 +53,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-const Register InstanceofDescriptor::left() { return eax; }
-const Register InstanceofDescriptor::right() { return edx; }
+const Register InstanceOfDescriptor::LeftRegister() { return edx; }
+const Register InstanceOfDescriptor::RightRegister() { return eax; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return edx; }
+const Register StringCompareDescriptor::RightRegister() { return eax; }
const Register ArgumentsAccessReadDescriptor::index() { return edx; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
+const Register ArgumentsAccessNewDescriptor::function() { return edi; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+
+
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -65,17 +85,11 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
-
- // When FLAG_vector_stores is true, we want to pass the map register on the
- // stack instead of in a register.
- DCHECK(FLAG_vector_stores || !MapRegister().is(no_reg));
-
- int register_count = FLAG_vector_stores ? 3 : 4;
- data->InitializePlatformSpecific(register_count, registers);
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
+ // The other three parameters are on the stack in ia32.
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -102,6 +116,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
@@ -189,6 +207,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edi : the target to call
+ Register registers[] = {edi, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, ebx, eax};
@@ -371,6 +398,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (including receiver)
+ ebx, // address of first argument
+ edi // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 71ad8387a0..850c182144 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -8,13 +8,13 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ia32/frames-ia32.h"
#include "src/ia32/lithium-codegen-ia32.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -76,7 +76,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
}
@@ -140,8 +140,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
- !info()->is_native() && info()->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
@@ -249,16 +248,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info_->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(edi);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -266,7 +276,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(edi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in eax. It replaces the context passed to us.
// It's saved in the stack and kept live in esi.
__ mov(esi, eax);
@@ -303,13 +314,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so esi still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -496,7 +501,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// For lazy deoptimization we need space to patch a call after every call.
// Ensure there is always space for such patching, even if the code ends
// in a call.
@@ -825,7 +830,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -1067,11 +1071,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2055,6 +2054,17 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
+ int true_block = instr->TrueDestination(chunk_);
+ if (cc == no_condition) {
+ __ jmp(chunk_->GetAssemblyLabel(true_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(true_block));
+ }
+}
+
+
template<class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
int false_block = instr->FalseDestination(chunk_);
@@ -2353,40 +2363,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ cmp(input, isolate()->factory()->null_value());
- __ j(equal, is_object);
-
- __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, is_not_object);
-
- __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, is_not_object);
- __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- Condition true_cond = EmitIsObject(
- reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond);
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2460,16 +2436,15 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ DCHECK(ToRegister(instr->context()).is(esi));
+ DCHECK(ToRegister(instr->left()).is(edx));
+ DCHECK(ToRegister(instr->right()).is(eax));
- Condition condition = ComputeCompareCondition(op);
- __ test(eax, Operand(eax));
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+ __ test(eax, eax);
- EmitBranch(instr, condition);
+ EmitBranch(instr, ComputeCompareCondition(instr->op()));
}
@@ -2614,120 +2589,41 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- // Object and function are in fixed registers defined by the stub.
DCHECK(ToRegister(instr->context()).is(esi));
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(eax));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(zero, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result, Label::kNear);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = ToRegister(instr->temp());
- __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
- __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
- __ j(not_equal, &cache_miss, Label::kNear);
- __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done, Label::kNear);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss);
- // Null is not an instance of anything.
- __ cmp(object, factory()->null_value());
- __ j(equal, &false_result, Label::kNear);
-
- // String values are not instances of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result, Label::kNear);
-
- // Go to the deferred code.
- __ jmp(deferred->entry());
-
- __ bind(&false_result);
- __ mov(ToRegister(instr->result()), factory()->false_value());
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
}
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- PushSafepointRegistersScope scope(this);
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = ToRegister(instr->scratch());
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
-
- // Get the temp register reserved by the instruction. This needs to be a
- // register which is pushed last by PushSafepointRegisters as top of the
- // stack is used to pass the offset to the location of the map check to
- // the stub.
- Register temp = ToRegister(instr->temp());
- DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 13;
- int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- __ mov(temp, Immediate(delta));
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- // Get the deoptimization index of the LLazyBailout-environment that
- // corresponds to this instruction.
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ test(object, Immediate(kSmiTagMask));
+ EmitFalseBranch(instr, zero);
+ }
- // Put the result value into the eax slot and restore all registers.
- __ StoreToSafepointRegisterSlot(eax, eax);
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, equal);
+ __ cmp(object_prototype, factory()->null_value());
+ EmitFalseBranch(instr, equal);
+ __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ jmp(&loop);
}
@@ -3438,11 +3334,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ mov(eax, arity);
- }
+ // Always initialize eax to the number of actual arguments.
+ __ mov(eax, arity);
// Invoke function directly.
if (function.is_identical_to(info()->closure())) {
@@ -3504,9 +3397,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
- if (instr->hydrogen()->pass_argument_count()) {
- __ mov(eax, instr->arity());
- }
+ __ mov(eax, instr->arity());
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -5390,7 +5281,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// eax = regexp literal clone.
// esi = context.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ LoadHeapObject(ecx, instr->hydrogen()->literals());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, factory()->undefined_value());
@@ -5433,26 +5324,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(esi);
- __ push(Immediate(instr->hydrogen()->shared_info()));
- __ push(Immediate(pretenure ? factory()->true_value()
- : factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -5523,24 +5394,24 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label, true_distance);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ // Check for callable and not undetectable objects => true.
+ __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
+ __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ __ cmp(input, 1 << Map::kIsCallable);
final_branch_condition = equal;
} else if (String::Equals(type_name, factory()->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ cmp(input, factory()->null_value());
__ j(equal, true_label, true_distance);
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
__ j(below, false_label, false_distance);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label, false_distance);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
final_branch_condition = zero;
// clang-format off
@@ -5588,7 +5459,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 285c817343..a26903a9ac 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -104,8 +104,6 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
@@ -260,7 +258,9 @@ class LCodeGen: public LCodeGenBase {
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
- template<class InstrType>
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition cc);
+ template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
XMMRegister result, NumberUntagDMode mode);
@@ -270,14 +270,6 @@ class LCodeGen: public LCodeGenBase {
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index b7a85cb228..884067b776 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ia32/lithium-ia32.h"
+
#include <sstream>
#if V8_TARGET_ARCH_IA32
@@ -187,13 +189,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -962,28 +957,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1034,22 +1026,22 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), esi),
- UseFixed(instr->left(), InstanceofStub::left()),
- FixedTemp(edi));
- return MarkAsCall(DefineFixed(result, eax), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ LOperand* temp = TempRegister();
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
}
@@ -1744,13 +1736,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsSmiOrTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -2553,13 +2538,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 9224dcba98..9e4b885c48 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -83,19 +83,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -134,6 +132,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -236,8 +235,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -397,6 +394,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
@@ -994,22 +997,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1068,7 +1055,7 @@ class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
@@ -1187,39 +1174,30 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
+ LOperand* scratch) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
+ temps_[0] = scratch;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
+ LOperand* scratch() const { return temps_[0]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2591,19 +2569,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 2ad52208e4..0ad5d778ec 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -8,9 +8,9 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/ia32/frames-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -66,8 +66,7 @@ void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- mov(destination, value);
+ mov(destination, isolate()->heap()->root_handle(index));
return;
}
ExternalReference roots_array_start =
@@ -105,16 +104,20 @@ void MacroAssembler::CompareRoot(Register with,
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
+ cmp(with, isolate()->heap()->root_handle(index));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
+ cmp(with, isolate()->heap()->root_handle(index));
+}
+
+
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Push(isolate()->heap()->root_handle(index));
}
@@ -754,26 +757,6 @@ Condition MacroAssembler::IsObjectNameType(Register heap_object,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- cmp(scratch,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- j(above, fail);
-}
-
-
void MacroAssembler::FCmp() {
fucomip();
fstp(0);
@@ -826,6 +809,18 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Push(object);
+ CmpObjectType(object, JS_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -873,6 +868,13 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on ia32.
@@ -1786,42 +1788,17 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
Label done, loop;
mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
bind(&loop);
- JumpIfSmi(result, &done);
+ JumpIfSmi(result, &done, Label::kNear);
CmpObjectType(result, MAP_TYPE, temp);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
jmp(&loop);
bind(&done);
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- // If a bound function, go to miss label.
- mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
-
- // Make sure that the function has an instance prototype.
- movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
- test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
- }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
mov(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1835,20 +1812,11 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the function does not have an initial map, we're done.
Label done;
CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
mov(result, FieldOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch);
- }
-
// All done.
bind(&done);
}
@@ -1961,10 +1929,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label invoke;
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ mov(eax, actual.immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- mov(eax, actual.immediate());
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -1982,10 +1950,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
+ mov(eax, actual.immediate());
cmp(expected.reg(), actual.immediate());
j(equal, &invoke);
DCHECK(expected.reg().is(ebx));
- mov(eax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
@@ -1993,6 +1961,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
j(equal, &invoke);
DCHECK(actual.reg().is(eax));
DCHECK(expected.reg().is(ebx));
+ } else {
+ Move(eax, actual.reg());
}
}
@@ -2093,8 +2063,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
@@ -2103,26 +2072,26 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinFunction(edi, id);
+ GetBuiltinFunction(edi, native_context_index);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
- mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- mov(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ mov(target, GlobalObjectOperand());
+ mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ mov(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, id);
+ GetBuiltinFunction(edi, native_context_index);
// Load the code entry point from the function into the target register.
mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
@@ -2154,6 +2123,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ mov(dst, GlobalObjectOperand());
+ mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
@@ -2625,81 +2600,6 @@ void MacroAssembler::LoadPowerOf2(XMMRegister dst,
}
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- JumpIfNotSmi(object, &not_smi, Label::kNear);
- mov(scratch, object);
- SmiUntag(scratch);
- jmp(&smi_hash_calculated, Label::kNear);
- bind(&not_smi);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
- j(parity_even, not_found); // Bail out if NaN is involved.
- j(not_equal, not_found); // The cache did not contain this value.
- jmp(&load_result_from_cache, Label::kNear);
-
- bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- j(not_equal, not_found);
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
-}
-
-
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
Register instance_type, Register scratch, Label* failure) {
if (!scratch.is(instance_type)) {
@@ -2876,7 +2776,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICacheWithoutIsolate(address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index b228ef9a28..508e2099ad 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -26,6 +26,9 @@ const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+// Spill slots used by interpreter dispatch calling convention.
+const int kInterpreterContextSpillSlot = -1;
+
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -68,6 +71,16 @@ class MacroAssembler: public Assembler {
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
+ // Load a register with a long value as efficiently as possible.
+ void Set(Register dst, int32_t x) {
+ if (x == 0) {
+ xor_(dst, dst);
+ } else {
+ mov(dst, Immediate(x));
+ }
+ }
+ void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
+
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
@@ -76,6 +89,22 @@ class MacroAssembler: public Assembler {
// and not in new space).
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+ Label::Distance if_equal_distance = Label::kNear) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kNear) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
// ---------------------------------------------------------------------------
// GC Support
@@ -259,6 +288,9 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -354,17 +386,15 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
// Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -451,18 +481,6 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
- // Check if a heap object's type is in the JSObject range, not including
- // JSFunction. The object's map will be loaded in the map register.
- // Any or all of the three registers may be the same.
- // The contents of the scratch register will always be overwritten.
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // The contents of the scratch register will be overwritten.
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
@@ -576,6 +594,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -728,11 +749,8 @@ class MacroAssembler: public Assembler {
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
// Picks out an array index from the hash field.
// Register use:
@@ -817,8 +835,14 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
+ void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+ void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Push(Register src) { push(src); }
+ void Push(const Operand& src) { push(src); }
+ void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
+ void PushReturnAddressFrom(Register src) { push(src); }
+ void PopReturnAddressTo(Register dst) { pop(dst); }
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, int8_t imm8);
@@ -899,17 +923,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
// Check whether the instance type represents a flat one-byte string. Jump to
// the label if not. If the instance type can be scratched specify same
// register for both instance type and scratch.
@@ -941,6 +954,9 @@ class MacroAssembler: public Assembler {
return SafepointRegisterStackIndex(reg.code());
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
diff --git a/deps/v8/src/ia32/simulator-ia32.cc b/deps/v8/src/ia32/simulator-ia32.cc
index 20edae83a2..d696e4b45e 100644
--- a/deps/v8/src/ia32/simulator-ia32.cc
+++ b/deps/v8/src/ia32/simulator-ia32.cc
@@ -2,5 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ia32/simulator-ia32.h"
// Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/deps/v8/src/ic/access-compiler.cc b/deps/v8/src/ic/access-compiler.cc
index 0dc9ab6e8d..951966e7de 100644
--- a/deps/v8/src/ic/access-compiler.cc
+++ b/deps/v8/src/ic/access-compiler.cc
@@ -49,5 +49,25 @@ Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
return store_calling_convention();
}
+
+
+Register PropertyAccessCompiler::slot() const {
+ if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+ return LoadDescriptor::SlotRegister();
+ }
+ DCHECK(FLAG_vector_stores &&
+ (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ return VectorStoreICDescriptor::SlotRegister();
+}
+
+
+Register PropertyAccessCompiler::vector() const {
+ if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+ return LoadWithVectorDescriptor::VectorRegister();
+ }
+ DCHECK(FLAG_vector_stores &&
+ (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC));
+ return VectorStoreICDescriptor::VectorRegister();
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/access-compiler.h b/deps/v8/src/ic/access-compiler.h
index 61567a2224..a5beb714f8 100644
--- a/deps/v8/src/ic/access-compiler.h
+++ b/deps/v8/src/ic/access-compiler.h
@@ -54,15 +54,12 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Register receiver() const { return registers_[0]; }
Register name() const { return registers_[1]; }
- Register slot() const { return LoadDescriptor::SlotRegister(); }
- Register vector() const { return LoadWithVectorDescriptor::VectorRegister(); }
+ Register slot() const;
+ Register vector() const;
Register scratch1() const { return registers_[2]; }
Register scratch2() const { return registers_[3]; }
Register scratch3() const { return registers_[4]; }
- // Calling convention between indexed store IC and handler.
- Register transition_map() const { return scratch1(); }
-
static Register* GetCallingConvention(Code::Kind);
static Register* load_calling_convention();
static Register* store_calling_convention();
@@ -81,6 +78,8 @@ class PropertyAccessCompiler BASE_EMBEDDED {
Isolate* isolate_;
MacroAssembler masm_;
+ // Ensure that MacroAssembler has a reasonable size.
+ STATIC_ASSERT(sizeof(MacroAssembler) < 128 * kPointerSize);
};
}
} // namespace v8::internal
diff --git a/deps/v8/src/ic/arm/access-compiler-arm.cc b/deps/v8/src/ic/arm/access-compiler-arm.cc
index 3b0c0c26c7..62f554792f 100644
--- a/deps/v8/src/ic/arm/access-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/access-compiler-arm.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(r3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || r3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r3, r4, r5};
return registers;
}
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index 6f4ddcf98a..e2585fe222 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -305,25 +306,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -566,6 +577,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/arm/ic-arm.cc b/deps/v8/src/ic/arm/ic-arm.cc
index a805f4ccee..de219ae72f 100644
--- a/deps/v8/src/ic/arm/ic-arm.cc
+++ b/deps/v8/src/ic/arm/ic-arm.cc
@@ -692,12 +692,20 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(r4, &slow);
+ // We use register r8 when FLAG_vector_stores is enabled, because otherwise
+ // probing the megamorphic stub cache would require pushing temporaries on
+ // the stack.
+ // TODO(mvstanton): quit using register r8 when
+ // FLAG_enable_embedded_constant_pool is turned on.
+ DCHECK(!FLAG_vector_stores || !FLAG_enable_embedded_constant_pool);
+ Register temporary2 = FLAG_vector_stores ? r8 : r4;
if (FLAG_vector_stores) {
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r3, r4, r5, r6));
+
+ DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -708,8 +716,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
- masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r3, r4, r5, r6);
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, flags, receiver, key, r5, temporary2, r6, r9);
// Cache miss.
__ b(&miss);
@@ -792,20 +800,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r3;
+ Register dictionary = r5;
DCHECK(receiver.is(r1));
DCHECK(name.is(r2));
DCHECK(value.is(r0));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(r3));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(r4));
__ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, r6, r9);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r6, r9);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/arm/ic-compiler-arm.cc b/deps/v8/src/ic/arm/ic-compiler-arm.cc
index ff2bcf05b1..9b8abd3298 100644
--- a/deps/v8/src/ic/arm/ic-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/ic-compiler-arm.cc
@@ -111,7 +111,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ b(ne, &next_map);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/arm/stub-cache-arm.cc b/deps/v8/src/ic/arm/stub-cache-arm.cc
index cdd04faf38..86710eb29a 100644
--- a/deps/v8/src/ic/arm/stub-cache-arm.cc
+++ b/deps/v8/src/ic/arm/stub-cache-arm.cc
@@ -120,8 +120,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/arm64/access-compiler-arm64.cc b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
index 14b0fa7f16..13b0887a82 100644
--- a/deps/v8/src/ic/arm64/access-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/access-compiler-arm64.cc
@@ -38,7 +38,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, value, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(x3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || x3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, x3, x4, x5};
return registers;
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 71c70da7a4..10ea1d72ff 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -298,27 +299,36 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, name and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow");
-
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -617,6 +627,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
__ B(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ Bind(&success);
diff --git a/deps/v8/src/ic/arm64/ic-arm64.cc b/deps/v8/src/ic/arm64/ic-arm64.cc
index 27c4f71431..c4c856aab7 100644
--- a/deps/v8/src/ic/arm64/ic-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-arm64.cc
@@ -696,7 +696,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, x3, x4, x5, x6));
+ DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -708,7 +708,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, x3, x4, x5, x6);
+ receiver, key, x5, x6, x7, x8);
// Cache miss.
__ B(&miss);
@@ -789,19 +789,21 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register value = StoreDescriptor::ValueRegister();
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- Register dictionary = x3;
- DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
+ Register dictionary = x5;
+ DCHECK(!AreAliased(value, receiver, name,
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister(), x5, x6, x7));
__ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, x6, x7);
__ Ret();
// Cache miss: Jump to runtime.
__ Bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, x6, x7);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
index a86b5e53b5..b4a4163fed 100644
--- a/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/ic-compiler-arm64.cc
@@ -116,7 +116,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
// This argument is used by the handler stub. For example, see
// ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
}
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ Bind(&skip);
diff --git a/deps/v8/src/ic/arm64/stub-cache-arm64.cc b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
index ecd7fe1534..eb82f2af86 100644
--- a/deps/v8/src/ic/arm64/stub-cache-arm64.cc
+++ b/deps/v8/src/ic/arm64/stub-cache-arm64.cc
@@ -111,8 +111,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index 98b30aa54d..77e0fb5e43 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -4,10 +4,11 @@
#include "src/ic/handler-compiler.h"
-#include "src/cpu-profiler.h"
#include "src/ic/call-optimization.h"
#include "src/ic/ic.h"
#include "src/ic/ic-inl.h"
+#include "src/isolate-inl.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -329,7 +330,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
PrototypeIterator iter(isolate(), last);
while (!iter.IsAtEnd()) {
lost_holder_register = true;
- last = JSObject::cast(iter.GetCurrent());
+ last = iter.GetCurrent<JSObject>();
iter.Advance();
}
auto last_handle = handle(last);
@@ -423,6 +424,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
Handle<Map> transition, Handle<Name> name) {
Label miss;
+ if (FLAG_vector_stores) PushVectorAndSlot();
+
// Check that we are allowed to write this.
bool is_nonexistent = holder()->map() == transition->GetBackPointer();
if (is_nonexistent) {
@@ -433,7 +436,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
: PrototypeIterator::END_AT_NULL;
PrototypeIterator iter(isolate(), holder());
while (!iter.IsAtEnd(end)) {
- last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ last = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
}
if (!last.is_null()) set_holder(last);
@@ -453,16 +456,19 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
DCHECK(!transition->is_access_check_needed());
// Call to respective StoreTransitionStub.
- Register transition_map_reg = StoreTransitionDescriptor::MapRegister();
- bool push_map_on_stack = transition_map_reg.is(no_reg);
- Register map_reg = push_map_on_stack ? scratch1() : transition_map_reg;
+ Register transition_map_reg = StoreTransitionHelper::MapRegister();
+ bool stack_args = StoreTransitionHelper::UsesStackArgs();
+ Register map_reg = stack_args ? scratch1() : transition_map_reg;
if (details.type() == DATA_CONSTANT) {
DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
- if (push_map_on_stack) {
+ if (stack_args) {
+ // Also pushes vector and slot.
GeneratePushMap(map_reg, scratch2());
+ } else if (FLAG_vector_stores) {
+ PopVectorAndSlot();
}
GenerateRestoreName(name);
StoreTransitionStub stub(isolate());
@@ -479,8 +485,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
: StoreTransitionStub::StoreMapAndValue;
GenerateRestoreMap(transition, map_reg, scratch2(), &miss);
- if (push_map_on_stack) {
+ if (stack_args) {
+ // Also pushes vector and slot.
GeneratePushMap(map_reg, scratch2());
+ } else if (FLAG_vector_stores) {
+ PopVectorAndSlot();
}
GenerateRestoreName(name);
StoreTransitionStub stub(isolate(),
@@ -490,21 +499,37 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
}
GenerateRestoreName(&miss, name);
+ if (FLAG_vector_stores) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, name);
}
+bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
+ HeapType* field_type) const {
+ return !field_type->Classes().Done();
+}
+
+
Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
Label miss;
DCHECK(it->representation().IsHeapObject());
- GenerateFieldTypeChecks(*it->GetFieldType(), value(), &miss);
+ HeapType* field_type = *it->GetFieldType();
+ bool need_save_restore = false;
+ if (RequiresFieldTypeChecks(field_type)) {
+ need_save_restore = IC::ICUseVector(kind());
+ if (need_save_restore) PushVectorAndSlot();
+ GenerateFieldTypeChecks(field_type, value(), &miss);
+ if (need_save_restore) PopVectorAndSlot();
+ }
+
StoreFieldStub stub(isolate(), it->GetFieldIndex(), it->representation());
GenerateTailCall(masm(), stub.GetCode());
__ bind(&miss);
+ if (need_save_restore) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
return GetCode(kind(), Code::FAST, it->name());
}
diff --git a/deps/v8/src/ic/handler-compiler.h b/deps/v8/src/ic/handler-compiler.h
index 05c973a625..f5dafe9038 100644
--- a/deps/v8/src/ic/handler-compiler.h
+++ b/deps/v8/src/ic/handler-compiler.h
@@ -262,6 +262,7 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
Register value_reg, Register scratch,
Label* miss_label);
+ bool RequiresFieldTypeChecks(HeapType* field_type) const;
void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
Label* miss_label);
diff --git a/deps/v8/src/ic/ia32/access-compiler-ia32.cc b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
index 81579e5dc3..acb3526d9d 100644
--- a/deps/v8/src/ic/ia32/access-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/access-compiler-ia32.cc
@@ -30,7 +30,8 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores ||
+ ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index c45821fe8b..1d019092c7 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -303,13 +304,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+ }
}
@@ -318,7 +330,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
@@ -327,7 +339,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -351,10 +364,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
Register scratch) {
- // Get the return address, push the argument and then continue.
- __ pop(scratch);
+ // current after GeneratePushMap
+ // -------------------------------------------------
+ // ret addr slot
+ // vector vector
+ // sp -> slot map
+ // sp -> ret addr
+ //
+ __ xchg(map_reg, Operand(esp, 0));
+ __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
__ push(map_reg);
- __ push(scratch);
}
@@ -574,6 +593,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
index a1e2cbcefe..d0a2e0bd54 100644
--- a/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-compiler-ia32.cc
@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/ia32/ic-ia32.cc b/deps/v8/src/ic/ia32/ic-ia32.cc
index d683264e13..7a6a41541c 100644
--- a/deps/v8/src/ic/ia32/ic-ia32.cc
+++ b/deps/v8/src/ic/ia32/ic-ia32.cc
@@ -577,7 +577,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, ebx, no_reg);
+ receiver, key, edi, no_reg);
if (FLAG_vector_stores) {
__ pop(VectorStoreICDescriptor::VectorRegister());
@@ -734,6 +734,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ // This shouldn't be called.
+ __ int3();
+ return;
+ }
+
// Return address is on the stack.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -787,22 +793,32 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = ebx;
-
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
+ if (FLAG_vector_stores) {
+ __ push(vector);
+ __ push(slot);
+ }
+
+ Register dictionary = ebx;
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(1);
+ __ Drop(FLAG_vector_stores ? 3 : 1);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
+ if (FLAG_vector_stores) {
+ __ pop(slot);
+ __ pop(vector);
+ }
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/ia32/stub-cache-ia32.cc b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
index 68b30e7bdb..7366ebe15f 100644
--- a/deps/v8/src/ic/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ic/ia32/stub-cache-ia32.cc
@@ -23,8 +23,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
Label miss;
+ bool is_vector_store =
+ IC::ICUseVector(ic_kind) &&
+ (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
@@ -56,19 +61,29 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (IC::ICUseVector(ic_kind)) {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ if (is_vector_store) {
+ // The overlap here is rather embarrassing. One does what one must.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ pop(vector);
+ __ mov(Operand::StaticVariable(virtual_register), extra);
+ __ pop(extra); // Pop "slot".
+ // Jump to the first instruction in the code stub.
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
}
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
__ bind(&miss);
} else {
+ DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
+
// Save the offset on the stack.
__ push(offset);
@@ -105,21 +120,22 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- if (IC::ICUseVector(ic_kind)) {
+ // Jump to the first instruction in the code stub.
+ if (is_vector_store) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!offset.is(vector) && !offset.is(slot));
-
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(Operand::StaticVariable(virtual_register), offset);
__ pop(vector);
- __ pop(slot);
+ __ pop(offset); // Pop "slot".
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
}
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
// Pop at miss.
__ bind(&miss);
__ pop(offset);
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index d7b95dada2..20e4fedc23 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -4,9 +4,9 @@
#include "src/ic/ic-compiler.h"
-#include "src/cpu-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic-inl.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -119,6 +119,25 @@ Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(
}
+Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ Handle<Map> receiver_map, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode) {
+ Isolate* isolate = receiver_map->GetIsolate();
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
+
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ Handle<Code> code =
+ compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
+ return code;
+}
+
+
Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode) {
@@ -165,7 +184,7 @@ Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
static void FillCache(Isolate* isolate, Handle<Code> code) {
Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
isolate->factory()->non_monomorphic_cache(), code->flags(), code);
- isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
+ isolate->heap()->SetRootNonMonomorphicCache(*dictionary);
}
@@ -220,31 +239,6 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
}
-Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
- MapHandleList* receiver_maps, LanguageMode language_mode) {
- Isolate* isolate = receiver_maps->at(0)->GetIsolate();
- DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
- Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
- Handle<PolymorphicCodeCache> cache =
- isolate->factory()->polymorphic_code_cache();
- Handle<Object> probe = cache->Lookup(receiver_maps, flags);
- if (probe->IsCode()) return Handle<Code>::cast(probe);
-
- CodeHandleList handlers(receiver_maps->length());
- ElementHandlerCompiler compiler(isolate);
- compiler.CompileElementHandlers(receiver_maps, &handlers, language_mode);
- PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
- Handle<Code> code = ic_compiler.CompilePolymorphic(
- receiver_maps, &handlers, isolate->factory()->empty_string(),
- Code::NORMAL, ELEMENT);
-
- isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
-
- PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
- return code;
-}
-
-
Handle<Code> PropertyICCompiler::ComputePolymorphic(
Code::Kind kind, MapHandleList* maps, CodeHandleList* handlers,
int valid_maps, Handle<Name> name, ExtraICState extra_ic_state) {
@@ -256,6 +250,23 @@ Handle<Code> PropertyICCompiler::ComputePolymorphic(
}
+void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
+ LanguageMode language_mode) {
+ Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+ DCHECK(store_mode == STANDARD_STORE ||
+ store_mode == STORE_AND_GROW_NO_TRANSITION ||
+ store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+ store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+ ExtraICState extra_state =
+ KeyedStoreIC::ComputeExtraICState(language_mode, store_mode);
+ PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+ compiler.CompileKeyedStorePolymorphicHandlers(
+ receiver_maps, transitioned_maps, handlers, store_mode);
+}
+
+
Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
LanguageMode language_mode) {
@@ -338,11 +349,9 @@ Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
}
-Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
- MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
- // Collect MONOMORPHIC stubs for all |receiver_maps|.
- CodeHandleList handlers(receiver_maps->length());
- MapHandleList transitioned_maps(receiver_maps->length());
+void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
for (int i = 0; i < receiver_maps->length(); ++i) {
Handle<Map> receiver_map(receiver_maps->at(i));
Handle<Code> cached_stub;
@@ -362,22 +371,36 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
transitioned_map->elements_kind(),
is_js_array, store_mode).GetCode();
} else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+ // TODO(mvstanton): Consider embedding store_mode in the state of the slow
+ // keyed store ic for uniformity.
cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
} else {
if (IsSloppyArgumentsElements(elements_kind)) {
- cached_stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
+ cached_stub =
+ KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
cached_stub = StoreFastElementStub(isolate(), is_js_array,
elements_kind, store_mode).GetCode();
} else {
- cached_stub = StoreElementStub(isolate(), elements_kind).GetCode();
+ cached_stub =
+ StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
}
}
DCHECK(!cached_stub.is_null());
- handlers.Add(cached_stub);
- transitioned_maps.Add(transitioned_map);
+ handlers->Add(cached_stub);
+ transitioned_maps->Add(transitioned_map);
}
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+ MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
+ // Collect MONOMORPHIC stubs for all |receiver_maps|.
+ CodeHandleList handlers(receiver_maps->length());
+ MapHandleList transitioned_maps(receiver_maps->length());
+ CompileKeyedStorePolymorphicHandlers(receiver_maps, &transitioned_maps,
+ &handlers, store_mode);
Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
&transitioned_maps);
@@ -390,20 +413,28 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
ElementsKind elements_kind = receiver_map->elements_kind();
bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
Handle<Code> stub;
if (receiver_map->has_sloppy_arguments_elements()) {
- stub = KeyedStoreSloppyArgumentsStub(isolate()).GetCode();
+ stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
} else if (receiver_map->has_fast_elements() ||
receiver_map->has_fixed_typed_array_elements()) {
stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
store_mode).GetCode();
} else {
- stub = StoreElementStub(isolate(), elements_kind).GetCode();
+ stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
}
+ return stub;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+ Handle<Code> stub =
+ CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index b5226e9a6e..ee6597d59d 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -34,11 +34,16 @@ class PropertyICCompiler : public PropertyAccessCompiler {
static Handle<Code> ComputeKeyedLoadMonomorphicHandler(
Handle<Map> receiver_map, ExtraICState extra_ic_state);
+ static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
+ Handle<Map> receiver_map, LanguageMode language_mode,
+ KeyedAccessStoreMode store_mode);
static Handle<Code> ComputeKeyedStoreMonomorphic(
Handle<Map> receiver_map, LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
- static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps,
- LanguageMode language_mode);
+ static void ComputeKeyedStorePolymorphicHandlers(
+ MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+ CodeHandleList* handlers, KeyedAccessStoreMode store_mode,
+ LanguageMode language_mode);
static Handle<Code> ComputeKeyedStorePolymorphic(
MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
LanguageMode language_mode);
@@ -78,10 +83,16 @@ class PropertyICCompiler : public PropertyAccessCompiler {
Handle<Name> name, Code::StubType type,
IcCheckType check);
+ Handle<Code> CompileKeyedStoreMonomorphicHandler(
+ Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
KeyedAccessStoreMode store_mode);
+ void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers,
+ KeyedAccessStoreMode store_mode);
Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
CodeHandleList* handler_stubs,
MapHandleList* transitioned_maps);
diff --git a/deps/v8/src/ic/ic-inl.h b/deps/v8/src/ic/ic-inl.h
index bce3c1206d..646b73d641 100644
--- a/deps/v8/src/ic/ic-inl.h
+++ b/deps/v8/src/ic/ic-inl.h
@@ -60,6 +60,7 @@ void IC::SetTargetAtAddress(Address address, Code* target,
DCHECK(!target->is_inline_cache_stub() ||
(target->kind() != Code::LOAD_IC &&
target->kind() != Code::KEYED_LOAD_IC &&
+ target->kind() != Code::CALL_IC &&
(!FLAG_vector_stores || (target->kind() != Code::STORE_IC &&
target->kind() != Code::KEYED_STORE_IC))));
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index fc33c80487..bc03d7d487 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -358,6 +358,8 @@ const char* CompareICState::GetStateName(State state) {
switch (state) {
case UNINITIALIZED:
return "UNINITIALIZED";
+ case BOOLEAN:
+ return "BOOLEAN";
case SMI:
return "SMI";
case NUMBER:
@@ -384,6 +386,8 @@ Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
switch (state) {
case UNINITIALIZED:
return Type::None(zone);
+ case BOOLEAN:
+ return Type::Boolean(zone);
case SMI:
return Type::SignedSmall(zone);
case NUMBER:
@@ -410,6 +414,7 @@ CompareICState::State CompareICState::NewInputState(State old_state,
Handle<Object> value) {
switch (old_state) {
case UNINITIALIZED:
+ if (value->IsBoolean()) return BOOLEAN;
if (value->IsSmi()) return SMI;
if (value->IsHeapNumber()) return NUMBER;
if (value->IsInternalizedString()) return INTERNALIZED_STRING;
@@ -417,6 +422,9 @@ CompareICState::State CompareICState::NewInputState(State old_state,
if (value->IsSymbol()) return UNIQUE_NAME;
if (value->IsJSObject()) return OBJECT;
break;
+ case BOOLEAN:
+ if (value->IsBoolean()) return BOOLEAN;
+ break;
case SMI:
if (value->IsSmi()) return SMI;
if (value->IsHeapNumber()) return NUMBER;
@@ -454,6 +462,7 @@ CompareICState::State CompareICState::TargetState(
bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) {
switch (old_state) {
case UNINITIALIZED:
+ if (x->IsBoolean() && y->IsBoolean()) return BOOLEAN;
if (x->IsSmi() && y->IsSmi()) return SMI;
if (x->IsNumber() && y->IsNumber()) return NUMBER;
if (Token::IsOrderedRelationalCompareOp(op)) {
@@ -470,16 +479,16 @@ CompareICState::State CompareICState::TargetState(
return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING;
}
if (x->IsString() && y->IsString()) return STRING;
- if (!Token::IsEqualityOp(op)) return GENERIC;
- if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
Handle<JSObject>::cast(y)->map()) {
return KNOWN_OBJECT;
} else {
- return OBJECT;
+ return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
}
}
+ if (!Token::IsEqualityOp(op)) return GENERIC;
+ if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
return GENERIC;
case SMI:
return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
@@ -496,11 +505,11 @@ CompareICState::State CompareICState::TargetState(
if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
return GENERIC;
case KNOWN_OBJECT:
- DCHECK(Token::IsEqualityOp(op));
if (x->IsJSObject() && y->IsJSObject()) {
- return OBJECT;
+ return Token::IsEqualityOp(op) ? OBJECT : GENERIC;
}
return GENERIC;
+ case BOOLEAN:
case STRING:
case UNIQUE_NAME:
case OBJECT:
@@ -510,5 +519,6 @@ CompareICState::State CompareICState::TargetState(
UNREACHABLE();
return GENERIC; // Make the compiler happy.
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index 0b4b9cdc99..b529b8c54d 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -174,9 +174,11 @@ class CompareICState {
// ... < GENERIC
// SMI < NUMBER
// INTERNALIZED_STRING < STRING
+ // INTERNALIZED_STRING < UNIQUE_NAME
// KNOWN_OBJECT < OBJECT
enum State {
UNINITIALIZED,
+ BOOLEAN,
SMI,
NUMBER,
STRING,
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 4ed85d5842..f0d571bed6 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -17,6 +17,7 @@
#include "src/ic/ic-inl.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
+#include "src/isolate-inl.h"
#include "src/macro-assembler.h"
#include "src/prototype.h"
#include "src/runtime/runtime.h"
@@ -122,8 +123,11 @@ void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
ExtraICState extra_state = new_target->extra_ic_state();
const char* modifier = "";
if (new_target->kind() == Code::KEYED_STORE_IC) {
- modifier = GetTransitionMarkModifier(
- KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
+ KeyedAccessStoreMode mode =
+ FLAG_vector_stores
+ ? casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode()
+ : KeyedStoreIC::GetKeyedAccessStoreMode(extra_state);
+ modifier = GetTransitionMarkModifier(mode);
}
PrintF(" (%c->%c%s) ", TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state), modifier);
@@ -271,11 +275,8 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
if (first_map == NULL) return false;
Handle<Map> old_map(first_map);
if (old_map->is_deprecated()) return true;
- if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
- receiver_map()->elements_kind())) {
- return true;
- }
- return false;
+ return IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
+ receiver_map()->elements_kind());
}
CacheHolderFlag flag;
@@ -665,6 +666,20 @@ void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
}
+void IC::ConfigureVectorState(MapHandleList* maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers) {
+ DCHECK(UseVector());
+ DCHECK(kind() == Code::KEYED_STORE_IC);
+ KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+ nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
+
+ vector_set_ = true;
+ OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+ POLYMORPHIC);
+}
+
+
MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
// If the object is undefined or null it's illegal to try to get any
// of its properties; throw a TypeError in that case.
@@ -966,7 +981,7 @@ static Handle<Code> KeyedStoreICInitializeStubHelper(
Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
LanguageMode language_mode,
State initialization_state) {
- if (FLAG_vector_stores) {
+ if (FLAG_vector_stores && initialization_state != MEGAMORPHIC) {
VectorKeyedStoreICTrampolineStub stub(isolate, StoreICState(language_mode));
return stub.GetCode();
}
@@ -988,6 +1003,13 @@ Handle<Code> KeyedStoreIC::initialize_stub_in_optimized_code(
}
+Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
+ ExtraICState extra_state) {
+ LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
+ return KeyedStoreICInitializeStubHelper(isolate, mode, MEGAMORPHIC);
+}
+
+
Handle<Code> LoadIC::megamorphic_stub() {
DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
return KeyedLoadIC::ChooseMegamorphicStub(isolate(), extra_ic_state());
@@ -1137,7 +1159,7 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
// Use specialized code for getting prototype of functions.
if (receiver->IsJSFunction() &&
Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
- Handle<JSFunction>::cast(receiver)->should_have_prototype() &&
+ receiver->IsConstructor() &&
!Handle<JSFunction>::cast(receiver)
->map()
->has_non_instance_prototype()) {
@@ -1851,8 +1873,9 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
}
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
+Handle<Code> KeyedStoreIC::StoreElementStub(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode) {
+ Handle<Code> null_handle;
// Don't handle megamorphic property accesses for INTERCEPTORS or
// ACCESSOR_CONSTANT
// via megamorphic stubs, since they don't have a map in their relocation info
@@ -1862,13 +1885,19 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
return megamorphic_stub();
}
- Handle<Map> receiver_map(receiver->map(), isolate());
MapHandleList target_receiver_maps;
TargetMaps(&target_receiver_maps);
if (target_receiver_maps.length() == 0) {
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
+ if (FLAG_vector_stores) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ monomorphic_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), monomorphic_map, handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
monomorphic_map, language_mode(), store_mode);
}
@@ -1878,7 +1907,9 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// superset of the original IC. Handle those here if the receiver map hasn't
// changed or it has transitioned to a more general kind.
KeyedAccessStoreMode old_store_mode =
- KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+ FLAG_vector_stores
+ ? GetKeyedAccessStoreMode()
+ : KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
if (state() == MONOMORPHIC) {
Handle<Map> transitioned_receiver_map = receiver_map;
@@ -1894,9 +1925,17 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// if they at least come from the same origin for a transitioning store,
// stay MONOMORPHIC and use the map for the most generic ElementsKind.
store_mode = GetNonTransitioningStoreMode(store_mode);
+ if (FLAG_vector_stores) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ transitioned_receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), transitioned_receiver_map,
+ handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
transitioned_receiver_map, language_mode(), store_mode);
- } else if (*previous_receiver_map == receiver->map() &&
+ } else if (receiver_map.is_identical_to(previous_receiver_map) &&
old_store_mode == STANDARD_STORE &&
(store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
@@ -1904,6 +1943,13 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
// A "normal" IC that handles stores can switch to a version that can
// grow at the end of the array, handle OOB accesses or copy COW arrays
// and still stay MONOMORPHIC.
+ if (FLAG_vector_stores) {
+ Handle<Code> handler =
+ PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
+ receiver_map, language_mode(), store_mode);
+ ConfigureVectorState(Handle<Name>::null(), receiver_map, handler);
+ return null_handle;
+ }
return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
receiver_map, language_mode(), store_mode);
}
@@ -1964,6 +2010,16 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
}
}
+ if (FLAG_vector_stores) {
+ MapHandleList transitioned_maps(target_receiver_maps.length());
+ CodeHandleList handlers(target_receiver_maps.length());
+ PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
+ &target_receiver_maps, &transitioned_maps, &handlers, store_mode,
+ language_mode());
+ ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
+ return null_handle;
+ }
+
return PropertyICCompiler::ComputeKeyedStorePolymorphic(
&target_receiver_maps, store_mode, language_mode());
}
@@ -1972,22 +2028,20 @@ Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
Handle<Map> map, KeyedAccessStoreMode store_mode) {
switch (store_mode) {
- case STORE_TRANSITION_SMI_TO_OBJECT:
- case STORE_TRANSITION_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
- return Map::TransitionElementsTo(map, FAST_ELEMENTS);
- case STORE_TRANSITION_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
- return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
- case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
- return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
- case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
- return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
+ case STORE_TRANSITION_TO_OBJECT:
+ case STORE_AND_GROW_TRANSITION_TO_OBJECT: {
+ ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
+ ? FAST_HOLEY_ELEMENTS
+ : FAST_ELEMENTS;
+ return Map::TransitionElementsTo(map, kind);
+ }
+ case STORE_TRANSITION_TO_DOUBLE:
+ case STORE_AND_GROW_TRANSITION_TO_DOUBLE: {
+ ElementsKind kind = IsFastHoleyElementsKind(map->elements_kind())
+ ? FAST_HOLEY_DOUBLE_ELEMENTS
+ : FAST_DOUBLE_ELEMENTS;
+ return Map::TransitionElementsTo(map, kind);
+ }
case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
DCHECK(map->has_fixed_typed_array_elements());
// Fall through
@@ -2023,26 +2077,14 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
// Handle growing array in stub if necessary.
if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
- }
+ return STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
- }
+ return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
- }
+ return STORE_AND_GROW_TRANSITION_TO_OBJECT;
}
}
return STORE_AND_GROW_NO_TRANSITION;
@@ -2050,25 +2092,13 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
// Handle only in-bounds elements accesses.
if (receiver->HasFastSmiElements()) {
if (value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
- } else {
- return STORE_TRANSITION_SMI_TO_DOUBLE;
- }
+ return STORE_TRANSITION_TO_DOUBLE;
} else if (value->IsHeapObject()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
- } else {
- return STORE_TRANSITION_SMI_TO_OBJECT;
- }
+ return STORE_TRANSITION_TO_OBJECT;
}
} else if (receiver->HasFastDoubleElements()) {
if (!value->IsSmi() && !value->IsHeapNumber()) {
- if (receiver->HasFastHoleyElements()) {
- return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
- } else {
- return STORE_TRANSITION_DOUBLE_TO_OBJECT;
- }
+ return STORE_TRANSITION_TO_OBJECT;
}
}
if (!FLAG_trace_external_array_abuse &&
@@ -2085,6 +2115,44 @@ static KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
}
+void KeyedStoreIC::ValidateStoreMode(Handle<Code> stub) {
+#ifdef DEBUG
+ DCHECK(!FLAG_vector_stores);
+ if (stub.is_null() || *stub == *megamorphic_stub() || *stub == *slow_stub()) {
+ return;
+ }
+
+ // Query the keyed store mode.
+ ExtraICState state = stub->extra_ic_state();
+ KeyedAccessStoreMode stub_mode = GetKeyedAccessStoreMode(state);
+
+ MapHandleList map_list;
+ stub->FindAllMaps(&map_list);
+ CodeHandleList list;
+ stub->FindHandlers(&list, map_list.length());
+ for (int i = 0; i < list.length(); i++) {
+ Handle<Code> handler = list.at(i);
+ CHECK(handler->is_handler());
+ CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
+ // Ensure that we only see handlers we know have the store mode embedded.
+ CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
+ major_key == CodeStub::StoreFastElement ||
+ major_key == CodeStub::StoreElement ||
+ major_key == CodeStub::ElementsTransitionAndStore ||
+ *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
+ // Ensure that the store mode matches that of the IC.
+ CHECK(major_key == CodeStub::NoCache ||
+ stub_mode == CommonStoreModeBits::decode(minor_key));
+ // The one exception is the keyed store slow builtin, which doesn't include
+ // store mode.
+ CHECK(major_key != CodeStub::NoCache ||
+ *handler == *isolate()->builtins()->KeyedStoreIC_Slow());
+ }
+#endif // DEBUG
+}
+
+
MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
Handle<Object> key,
Handle<Object> value) {
@@ -2148,23 +2216,47 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
+ Handle<Map> old_receiver_map;
+ bool sloppy_arguments_elements = false;
+ bool key_is_valid_index = false;
+ KeyedAccessStoreMode store_mode = STANDARD_STORE;
+ if (use_ic && object->IsJSObject()) {
+ Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+ old_receiver_map = handle(receiver->map(), isolate());
+ sloppy_arguments_elements =
+ !is_sloppy(language_mode()) &&
+ receiver->elements()->map() ==
+ isolate()->heap()->sloppy_arguments_elements_map();
+ if (!sloppy_arguments_elements) {
+ key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
+ if (key_is_valid_index) {
+ uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
+ store_mode = GetStoreMode(receiver, index, value);
+ }
+ }
+ }
+
+ DCHECK(store_handle.is_null());
+ ASSIGN_RETURN_ON_EXCEPTION(isolate(), store_handle,
+ Runtime::SetObjectProperty(isolate(), object, key,
+ value, language_mode()),
+ Object);
+
if (use_ic) {
- if (object->IsJSObject()) {
- Handle<JSObject> receiver = Handle<JSObject>::cast(object);
- if (receiver->elements()->map() ==
- isolate()->heap()->sloppy_arguments_elements_map() &&
- !is_sloppy(language_mode())) {
+ if (!old_receiver_map.is_null()) {
+ if (sloppy_arguments_elements) {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
- } else if (key->IsSmi() && Smi::cast(*key)->value() >= 0) {
- uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
+ } else if (key_is_valid_index) {
// We should go generic if receiver isn't a dictionary, but our
// prototype chain does have dictionary elements. This ensures that
// other non-dictionary receivers in the polymorphic case benefit
// from fast path keyed stores.
- if (!receiver->map()->DictionaryElementsInPrototypeChainOnly()) {
- KeyedAccessStoreMode store_mode =
- GetStoreMode(receiver, index, value);
- stub = StoreElementStub(receiver, store_mode);
+ if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
+ stub = StoreElementStub(old_receiver_map, store_mode);
+
+ // Validate that the store_mode in the stub can also be derived
+ // from peeking in the code bits of the handlers.
+ if (!FLAG_vector_stores) ValidateStoreMode(stub);
} else {
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
}
@@ -2176,14 +2268,6 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
}
- if (store_handle.is_null()) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), store_handle,
- Runtime::SetObjectProperty(isolate(), object, key, value,
- language_mode()),
- Object);
- }
-
if (FLAG_vector_stores) {
if (!is_vector_set() || stub.is_null()) {
Code* megamorphic = *megamorphic_stub();
@@ -2213,73 +2297,7 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
}
-bool CallIC::DoCustomHandler(Handle<Object> function,
- const CallICState& callic_state) {
- DCHECK(FLAG_use_ic && function->IsJSFunction());
-
- // Are we the array function?
- Handle<JSFunction> array_function =
- Handle<JSFunction>(isolate()->native_context()->array_function());
- if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
- // Alter the slot.
- CallICNexus* nexus = casted_nexus<CallICNexus>();
- nexus->ConfigureMonomorphicArray();
-
- // Vector-based ICs have a different calling convention in optimized code
- // than full code so the correct stub has to be chosen.
- if (AddressIsOptimizedCode()) {
- CallIC_ArrayStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- } else {
- CallIC_ArrayTrampolineStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- }
-
- Handle<String> name;
- if (array_function->shared()->name()->IsString()) {
- name = Handle<String>(String::cast(array_function->shared()->name()),
- isolate());
- }
- TRACE_IC("CallIC", name);
- OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
- MONOMORPHIC);
- return true;
- }
- return false;
-}
-
-
-void CallIC::PatchMegamorphic(Handle<Object> function) {
- CallICState callic_state(target()->extra_ic_state());
-
- // We are going generic.
- CallICNexus* nexus = casted_nexus<CallICNexus>();
- nexus->ConfigureMegamorphic();
-
- // Vector-based ICs have a different calling convention in optimized code
- // than full code so the correct stub has to be chosen.
- if (AddressIsOptimizedCode()) {
- CallICStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- } else {
- CallICTrampolineStub stub(isolate(), callic_state);
- set_target(*stub.GetCode());
- }
-
- Handle<Object> name = isolate()->factory()->empty_string();
- if (function->IsJSFunction()) {
- Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- name = handle(js_function->shared()->name(), isolate());
- }
-
- TRACE_IC("CallIC", name);
- OnTypeFeedbackChanged(isolate(), get_host(), nexus->vector(), state(),
- GENERIC);
-}
-
-
void CallIC::HandleMiss(Handle<Object> function) {
- CallICState callic_state(target()->extra_ic_state());
Handle<Object> name = isolate()->factory()->empty_string();
CallICNexus* nexus = casted_nexus<CallICNexus>();
Object* feedback = nexus->GetFeedback();
@@ -2287,25 +2305,22 @@ void CallIC::HandleMiss(Handle<Object> function) {
// Hand-coded MISS handling is easier if CallIC slots don't contain smis.
DCHECK(!feedback->IsSmi());
- if (feedback->IsWeakCell() || !function->IsJSFunction()) {
+ if (feedback->IsWeakCell() || !function->IsJSFunction() ||
+ feedback->IsAllocationSite()) {
// We are going generic.
nexus->ConfigureMegamorphic();
} else {
- // The feedback is either uninitialized or an allocation site.
- // It might be an allocation site because if we re-compile the full code
- // to add deoptimization support, we call with the default call-ic, and
- // merely need to patch the target to match the feedback.
- // TODO(mvstanton): the better approach is to dispense with patching
- // altogether, which is in progress.
- DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()) ||
- feedback->IsAllocationSite());
-
- // Do we want to install a custom handler?
- if (FLAG_use_ic && DoCustomHandler(function, callic_state)) {
- return;
- }
+ DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()));
+ Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
- nexus->ConfigureMonomorphic(Handle<JSFunction>::cast(function));
+ Handle<JSFunction> array_function =
+ Handle<JSFunction>(isolate()->native_context()->array_function());
+ if (array_function.is_identical_to(js_function)) {
+ // Alter the slot.
+ nexus->ConfigureMonomorphicArray();
+ } else {
+ nexus->ConfigureMonomorphic(js_function);
+ }
}
if (function->IsJSFunction()) {
@@ -2342,22 +2357,6 @@ RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
}
-RUNTIME_FUNCTION(Runtime_CallIC_Customization_Miss) {
- TimerEventScope<TimerEventIcMiss> timer(isolate);
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- Handle<Object> function = args.at<Object>(0);
- Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
- Handle<Smi> slot = args.at<Smi>(2);
- FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- CallICNexus nexus(vector, vector_slot);
- // A miss on a custom call ic always results in going megamorphic.
- CallIC ic(isolate, &nexus);
- ic.PatchMegamorphic(function);
- return *function;
-}
-
-
// Used from ic-<arch>.cc.
RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
@@ -2373,13 +2372,14 @@ RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
+ vector->GetKind(vector_slot));
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2439,18 +2439,19 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
Handle<Object> result;
if (FLAG_vector_stores) {
- DCHECK(args.length() == 5);
+ DCHECK(args.length() == 5 || args.length() == 6);
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- if (vector->GetKind(vector_slot) == Code::STORE_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2481,14 +2482,15 @@ RUNTIME_FUNCTION(Runtime_StoreIC_MissFromStubFailure) {
Handle<Smi> slot = args.at<Smi>(3);
Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(4);
FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
- if (vector->GetKind(vector_slot) == Code::STORE_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
StoreICNexus nexus(vector, vector_slot);
StoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
ic.Store(receiver, key, value));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
+ vector->GetKind(vector_slot));
KeyedStoreICNexus nexus(vector, vector_slot);
KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
@@ -2567,12 +2569,19 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_MissFromStubFailure) {
RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- LanguageMode language_mode = ic.language_mode();
+ LanguageMode language_mode;
+ if (FLAG_vector_stores) {
+ StoreICNexus nexus(isolate);
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
+ } else {
+ StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ language_mode = ic.language_mode();
+ }
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2583,12 +2592,19 @@ RUNTIME_FUNCTION(Runtime_StoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ DCHECK(args.length() == (FLAG_vector_stores ? 5 : 3));
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- LanguageMode language_mode = ic.language_mode();
+ LanguageMode language_mode;
+ if (FLAG_vector_stores) {
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
+ } else {
+ KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+ language_mode = ic.language_mode();
+ }
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
@@ -2600,14 +2616,20 @@ RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Slow) {
RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
TimerEventScope<TimerEventIcMiss> timer(isolate);
HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ DCHECK(args.length() == (FLAG_vector_stores ? 6 : 4));
Handle<Object> object = args.at<Object>(0);
Handle<Object> key = args.at<Object>(1);
Handle<Object> value = args.at<Object>(2);
- Handle<Map> map = args.at<Map>(3);
-
- LanguageMode language_mode = ic.language_mode();
+ Handle<Map> map = args.at<Map>(FLAG_vector_stores ? 5 : 3);
+ LanguageMode language_mode;
+ if (FLAG_vector_stores) {
+ KeyedStoreICNexus nexus(isolate);
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+ language_mode = ic.language_mode();
+ } else {
+ KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+ language_mode = ic.language_mode();
+ }
if (object->IsJSObject()) {
JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
map->elements_kind());
@@ -2626,13 +2648,67 @@ MaybeHandle<Object> BinaryOpIC::Transition(
BinaryOpICState state(isolate(), target()->extra_ic_state());
// Compute the actual result using the builtin for the binary operation.
- Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
- TokenToJSBuiltin(state.op(), state.strength()));
- Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate(), result, Execution::Call(isolate(), function, left, 1, &right),
- Object);
+ switch (state.op()) {
+ default:
+ UNREACHABLE();
+ case Token::ADD:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Add(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::SUB:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Subtract(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::MUL:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Multiply(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::DIV:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Divide(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::MOD:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::Modulus(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::BIT_OR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::BitwiseOr(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::BIT_AND:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::BitwiseAnd(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::BIT_XOR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::BitwiseXor(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::SAR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::ShiftRight(isolate(), left, right, state.strength()), Object);
+ break;
+ case Token::SHR:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::ShiftRightLogical(isolate(), left, right, state.strength()),
+ Object);
+ break;
+ case Token::SHL:
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate(), result,
+ Object::ShiftLeft(isolate(), left, right, state.strength()), Object);
+ break;
+ }
// Do not try to update the target if the code was marked for lazy
// deoptimization. (Since we do not relocate addresses in these
@@ -2866,42 +2942,6 @@ RUNTIME_FUNCTION(Runtime_Unreachable) {
}
-Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op,
- Strength strength) {
- if (is_strong(strength)) {
- switch (op) {
- default: UNREACHABLE();
- case Token::ADD: return Builtins::ADD_STRONG;
- case Token::SUB: return Builtins::SUB_STRONG;
- case Token::MUL: return Builtins::MUL_STRONG;
- case Token::DIV: return Builtins::DIV_STRONG;
- case Token::MOD: return Builtins::MOD_STRONG;
- case Token::BIT_OR: return Builtins::BIT_OR_STRONG;
- case Token::BIT_AND: return Builtins::BIT_AND_STRONG;
- case Token::BIT_XOR: return Builtins::BIT_XOR_STRONG;
- case Token::SAR: return Builtins::SAR_STRONG;
- case Token::SHR: return Builtins::SHR_STRONG;
- case Token::SHL: return Builtins::SHL_STRONG;
- }
- } else {
- switch (op) {
- default: UNREACHABLE();
- case Token::ADD: return Builtins::ADD;
- case Token::SUB: return Builtins::SUB;
- case Token::MUL: return Builtins::MUL;
- case Token::DIV: return Builtins::DIV;
- case Token::MOD: return Builtins::MOD;
- case Token::BIT_OR: return Builtins::BIT_OR;
- case Token::BIT_AND: return Builtins::BIT_AND;
- case Token::BIT_XOR: return Builtins::BIT_XOR;
- case Token::SAR: return Builtins::SAR;
- case Token::SHR: return Builtins::SHR;
- case Token::SHL: return Builtins::SHL;
- }
- }
-}
-
-
Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
ToBooleanStub stub(isolate(), target()->extra_ic_state());
bool to_boolean_value = stub.UpdateStatus(object);
@@ -3073,13 +3113,14 @@ RUNTIME_FUNCTION(Runtime_LoadIC_MissFromStubFailure) {
// A monomorphic or polymorphic KeyedLoadIC with a string key can call the
// LoadIC miss handler if the handler misses. Since the vector Nexus is
// set up outside the IC, handle that here.
- if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+ if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::LOAD_IC) {
LoadICNexus nexus(vector, vector_slot);
LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
} else {
- DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC,
+ vector->GetKind(vector_slot));
KeyedLoadICNexus nexus(vector, vector_slot);
KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
ic.UpdateState(receiver, key);
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index ee5fd261dc..d65d7a8c1b 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -122,6 +122,11 @@ class IC {
// Configure the vector for POLYMORPHIC.
void ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
CodeHandleList* handlers);
+ // Configure the vector for POLYMORPHIC with transitions (only for element
+ // keyed stores).
+ void ConfigureVectorState(MapHandleList* maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers);
char TransitionMarkFromState(IC::State state);
void TraceIC(const char* type, Handle<Object> name);
@@ -280,14 +285,8 @@ class CallIC : public IC {
DCHECK(nexus != NULL);
}
- void PatchMegamorphic(Handle<Object> function);
-
void HandleMiss(Handle<Object> function);
- // Returns true if a custom handler was installed.
- bool DoCustomHandler(Handle<Object> function,
- const CallICState& callic_state);
-
// Code generator routines.
static Handle<Code> initialize_stub(Isolate* isolate, int argc,
CallICState::CallType call_type);
@@ -324,6 +323,7 @@ class LoadIC : public IC {
}
// Code generator routines.
+
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
static void GenerateMiss(MacroAssembler* masm);
static void GenerateRuntimeGetProperty(MacroAssembler* masm,
@@ -525,9 +525,9 @@ class KeyedStoreIC : public StoreIC {
// When more language modes are added, these BitFields need to move too.
STATIC_ASSERT(i::LANGUAGE_END == 3);
class ExtraICStateKeyedAccessStoreMode
- : public BitField<KeyedAccessStoreMode, 3, 4> {}; // NOLINT
+ : public BitField<KeyedAccessStoreMode, 3, 3> {}; // NOLINT
- class IcCheckTypeField : public BitField<IcCheckType, 7, 1> {};
+ class IcCheckTypeField : public BitField<IcCheckType, 6, 1> {};
static ExtraICState ComputeExtraICState(LanguageMode flag,
KeyedAccessStoreMode mode) {
@@ -538,10 +538,17 @@ class KeyedStoreIC : public StoreIC {
static KeyedAccessStoreMode GetKeyedAccessStoreMode(
ExtraICState extra_state) {
+ DCHECK(!FLAG_vector_stores);
return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
}
+ KeyedAccessStoreMode GetKeyedAccessStoreMode() {
+ DCHECK(FLAG_vector_stores);
+ return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+ }
+
static IcCheckType GetKeyType(ExtraICState extra_state) {
+ DCHECK(!FLAG_vector_stores);
return IcCheckTypeField::decode(extra_state);
}
@@ -571,6 +578,8 @@ class KeyedStoreIC : public StoreIC {
static Handle<Code> initialize_stub_in_optimized_code(
Isolate* isolate, LanguageMode language_mode, State initialization_state);
+ static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
+ ExtraICState extra_state);
static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
@@ -587,7 +596,7 @@ class KeyedStoreIC : public StoreIC {
}
}
- Handle<Code> StoreElementStub(Handle<JSObject> receiver,
+ Handle<Code> StoreElementStub(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
private:
@@ -599,6 +608,8 @@ class KeyedStoreIC : public StoreIC {
Handle<Map> ComputeTransitionedMap(Handle<Map> map,
KeyedAccessStoreMode store_mode);
+ void ValidateStoreMode(Handle<Code> stub);
+
friend class IC;
};
@@ -608,9 +619,6 @@ class BinaryOpIC : public IC {
public:
explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
- static Builtins::JavaScript TokenToJSBuiltin(Token::Value op,
- Strength strength);
-
MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
Handle<Object> left,
Handle<Object> right) WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/ic/mips/access-compiler-mips.cc b/deps/v8/src/ic/mips/access-compiler-mips.cc
index 9aba385497..f2f6c62c71 100644
--- a/deps/v8/src/ic/mips/access-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/access-compiler-mips.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(a3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, t0, t1};
return registers;
}
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index e3d4ae3adc..8c135e4088 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -295,25 +296,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -556,6 +567,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/mips/ic-compiler-mips.cc b/deps/v8/src/ic/mips/ic-compiler-mips.cc
index 80f5c4783f..64f1662880 100644
--- a/deps/v8/src/ic/mips/ic-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/ic-compiler-mips.cc
@@ -100,7 +100,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ Branch(&next_map, ne, match, Operand(map_reg));
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/mips/ic-mips.cc b/deps/v8/src/ic/mips/ic-mips.cc
index a673dbf254..a1a118135b 100644
--- a/deps/v8/src/ic/mips/ic-mips.cc
+++ b/deps/v8/src/ic/mips/ic-mips.cc
@@ -681,7 +681,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a3, t0, t1, t2));
+ DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -693,7 +693,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, a3, t0, t1, t2);
+ receiver, key, t1, t2, t4, t5);
// Cache miss.
__ Branch(&miss);
@@ -794,20 +794,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = a3;
+ Register dictionary = t1;
DCHECK(receiver.is(a1));
DCHECK(name.is(a2));
DCHECK(value.is(a0));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0));
__ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+ __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+ __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/mips/stub-cache-mips.cc b/deps/v8/src/ic/mips/stub-cache-mips.cc
index 12cacc8f4f..1a9897e8f3 100644
--- a/deps/v8/src/ic/mips/stub-cache-mips.cc
+++ b/deps/v8/src/ic/mips/stub-cache-mips.cc
@@ -116,8 +116,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/mips64/access-compiler-mips64.cc b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
index a2e7aed4dc..500a6d65c7 100644
--- a/deps/v8/src/ic/mips64/access-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/access-compiler-mips64.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(a3.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || a3.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, a3, a4, a5};
return registers;
}
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 49e9265aee..9c3a5b3e70 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -296,25 +297,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -350,7 +361,7 @@ void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
DCHECK(!map_reg.is(scratch));
__ LoadWeakValue(map_reg, cell, miss);
if (transition->CanBeDeprecated()) {
- __ ld(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+ __ lwu(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
__ And(at, scratch, Operand(Map::Deprecated::kMask));
__ Branch(miss, ne, at, Operand(zero_reg));
}
@@ -557,6 +568,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ Branch(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
index a834430e1e..8cdd8f03bc 100644
--- a/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-compiler-mips64.cc
@@ -100,7 +100,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ Branch(&next_map, ne, match, Operand(map_reg));
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 6f3916dd2e..0d7af56071 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -551,7 +551,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
// We have to see if the double version of the hole is present. If so
// go to the runtime.
__ Daddu(address, elements,
- Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) -
+ Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
kHeapObjectTag));
__ SmiScale(at, key, kPointerSizeLog2);
__ daddu(address, address, at);
@@ -677,9 +677,10 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
if (FLAG_vector_stores) {
// The handlers in the stub cache expect a vector and slot. Since we won't
// change the IC from any downstream misses, a dummy vector can be used.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, a3, a4, a5, a6));
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+
+ DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -691,7 +692,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, a3, a4, a5, a6);
+ receiver, key, a5, a6, a7, t0);
// Cache miss.
__ Branch(&miss);
@@ -792,18 +793,20 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = a3;
- DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5));
+ Register dictionary = a5;
+ DCHECK(!AreAliased(
+ value, receiver, name, VectorStoreICDescriptor::VectorRegister(),
+ VectorStoreICDescriptor::SlotRegister(), dictionary, a6, a7));
__ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5);
+ __ IncrementCounter(counters->store_normal_hit(), 1, a6, a7);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5);
+ __ IncrementCounter(counters->store_normal_miss(), 1, a6, a7);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/mips64/stub-cache-mips64.cc b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
index b1ec640719..4ab9f8e5b2 100644
--- a/deps/v8/src/ic/mips64/stub-cache-mips64.cc
+++ b/deps/v8/src/ic/mips64/stub-cache-mips64.cc
@@ -119,8 +119,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
index 2021b80fd8..fcbbc66121 100644
--- a/deps/v8/src/ic/ppc/access-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
@@ -31,7 +31,7 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(r6.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores || r6.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, r6, r7, r8};
return registers;
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 9ec2f5ff3f..52efcf91a4 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -304,25 +305,35 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
}
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister(),
+ VectorStoreICDescriptor::SlotRegister(),
+ VectorStoreICDescriptor::VectorRegister());
+ } else {
+ __ Push(StoreDescriptor::ReceiverRegister(),
+ StoreDescriptor::NameRegister(), StoreDescriptor::ValueRegister());
+ }
+}
+
+
void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
- // Push receiver, key and value for runtime call.
- __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
- StoreDescriptor::ValueRegister());
+ StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -564,6 +575,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ b(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index 59054b2058..578b73d40e 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ bne(&next_map);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
index 7cac3058bb..09117179ea 100644
--- a/deps/v8/src/ic/ppc/ic-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-ppc.cc
@@ -711,7 +711,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
// change the IC from any downstream misses, a dummy vector can be used.
Register vector = VectorStoreICDescriptor::VectorRegister();
Register slot = VectorStoreICDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, r6, r7, r8, r9));
+ DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
Handle<TypeFeedbackVector> dummy_vector =
TypeFeedbackVector::DummyVector(masm->isolate());
int slot_index = dummy_vector->GetIndex(
@@ -723,7 +723,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, r6, r7, r8, r9);
+ receiver, key, r8, r9, r10, r11);
// Cache miss.
__ b(&miss);
@@ -806,20 +806,22 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = r6;
+ Register dictionary = r8;
DCHECK(receiver.is(r4));
DCHECK(name.is(r5));
DCHECK(value.is(r3));
+ DCHECK(VectorStoreICDescriptor::VectorRegister().is(r6));
+ DCHECK(VectorStoreICDescriptor::SlotRegister().is(r7));
__ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
- GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8);
+ GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8);
+ __ IncrementCounter(counters->store_normal_hit(), 1, r9, r10);
__ Ret();
__ bind(&miss);
- __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8);
+ __ IncrementCounter(counters->store_normal_miss(), 1, r9, r10);
GenerateMiss(masm);
}
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
index ed703fb21e..6030b2cbc8 100644
--- a/deps/v8/src/ic/ppc/stub-cache-ppc.cc
+++ b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
@@ -137,8 +137,14 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// extra3 don't conflict with the vector and slot registers, which need
// to be preserved for a handler call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadWithVectorDescriptor::SlotRegister();
+ Register vector, slot;
+ if (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC) {
+ vector = VectorStoreICDescriptor::VectorRegister();
+ slot = VectorStoreICDescriptor::SlotRegister();
+ } else {
+ vector = LoadWithVectorDescriptor::VectorRegister();
+ slot = LoadWithVectorDescriptor::SlotRegister();
+ }
DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
}
#endif
diff --git a/deps/v8/src/ic/x64/access-compiler-x64.cc b/deps/v8/src/ic/x64/access-compiler-x64.cc
index 63e60f0b91..85b44ef475 100644
--- a/deps/v8/src/ic/x64/access-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/access-compiler-x64.cc
@@ -31,7 +31,8 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(rbx.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores ||
+ rbx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, rbx, rdi, r8};
return registers;
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 920d06c541..1490c921fc 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -303,13 +304,26 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+
+ __ PopReturnAddressTo(r11);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ Push(slot);
+ __ Push(vector);
+ __ PushReturnAddressFrom(r11);
+ } else {
+ DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
- __ PopReturnAddressTo(rbx);
- __ Push(receiver);
- __ Push(name);
- __ Push(value);
- __ PushReturnAddressFrom(rbx);
+ __ PopReturnAddressTo(rbx);
+ __ Push(receiver);
+ __ Push(name);
+ __ Push(value);
+ __ PushReturnAddressFrom(rbx);
+ }
}
@@ -318,7 +332,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
@@ -327,7 +341,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -574,6 +589,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/x64/ic-compiler-x64.cc b/deps/v8/src/ic/x64/ic-compiler-x64.cc
index d5e548412c..fd92cca570 100644
--- a/deps/v8/src/ic/x64/ic-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/ic-compiler-x64.cc
@@ -55,7 +55,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/x64/ic-x64.cc b/deps/v8/src/ic/x64/ic-x64.cc
index 8d334809cb..ff74a965e4 100644
--- a/deps/v8/src/ic/x64/ic-x64.cc
+++ b/deps/v8/src/ic/x64/ic-x64.cc
@@ -582,7 +582,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, rbx, no_reg);
+ receiver, key, r9, no_reg);
// Cache miss.
__ jmp(&miss);
@@ -735,8 +735,13 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
- // The return address is on the stack.
+ if (FLAG_vector_stores) {
+ // This shouldn't be called.
+ __ int3();
+ return;
+ }
+ // The return address is on the stack.
// Get the receiver from the stack and probe the stub cache.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -785,7 +790,10 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = rbx;
+ Register dictionary = r11;
+ DCHECK(!FLAG_vector_stores ||
+ !AreAliased(dictionary, VectorStoreICDescriptor::VectorRegister(),
+ VectorStoreICDescriptor::SlotRegister()));
Label miss;
diff --git a/deps/v8/src/ic/x64/stub-cache-x64.cc b/deps/v8/src/ic/x64/stub-cache-x64.cc
index 3908018927..9a9dfe9f4b 100644
--- a/deps/v8/src/ic/x64/stub-cache-x64.cc
+++ b/deps/v8/src/ic/x64/stub-cache-x64.cc
@@ -110,9 +110,16 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
// the vector and slot registers, which need to be preserved for a handler
// call or miss.
if (IC::ICUseVector(ic_kind)) {
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!AreAliased(vector, slot, scratch));
+ if (ic_kind == Code::LOAD_IC || ic_kind == Code::KEYED_LOAD_IC) {
+ Register vector = LoadWithVectorDescriptor::VectorRegister();
+ Register slot = LoadDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch));
+ } else {
+ DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ DCHECK(!AreAliased(vector, slot, scratch));
+ }
}
#endif
diff --git a/deps/v8/src/ic/x87/access-compiler-x87.cc b/deps/v8/src/ic/x87/access-compiler-x87.cc
index bdcbb166b9..a80c649e45 100644
--- a/deps/v8/src/ic/x87/access-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/access-compiler-x87.cc
@@ -30,7 +30,8 @@ Register* PropertyAccessCompiler::store_calling_convention() {
// receiver, name, scratch1, scratch2, scratch3.
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
- DCHECK(ebx.is(StoreTransitionDescriptor::MapRegister()));
+ DCHECK(FLAG_vector_stores ||
+ ebx.is(StoreTransitionDescriptor::MapRegister()));
static Register registers[] = {receiver, name, ebx, edi, no_reg};
return registers;
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index c0d5fd8234..d9f7e8012d 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -7,6 +7,7 @@
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
@@ -303,13 +304,24 @@ static void StoreIC_PushArgs(MacroAssembler* masm) {
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ if (FLAG_vector_stores) {
+ Register slot = VectorStoreICDescriptor::SlotRegister();
+ Register vector = VectorStoreICDescriptor::VectorRegister();
- __ pop(ebx);
- __ push(receiver);
- __ push(name);
- __ push(value);
- __ push(ebx);
+ __ xchg(receiver, Operand(esp, 0));
+ __ push(name);
+ __ push(value);
+ __ push(slot);
+ __ push(vector);
+ __ push(receiver); // which contains the return address.
+ } else {
+ DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+ __ pop(ebx);
+ __ push(receiver);
+ __ push(name);
+ __ push(value);
+ __ push(ebx);
+ }
}
@@ -318,7 +330,7 @@ void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kStoreIC_Slow, FLAG_vector_stores ? 5 : 3, 1);
}
@@ -327,7 +339,8 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
StoreIC_PushArgs(masm);
// Do tail-call to runtime routine.
- __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, 3, 1);
+ __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow, FLAG_vector_stores ? 5 : 3,
+ 1);
}
@@ -351,10 +364,16 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
void NamedStoreHandlerCompiler::GeneratePushMap(Register map_reg,
Register scratch) {
- // Get the return address, push the argument and then continue.
- __ pop(scratch);
+ // current after GeneratePushMap
+ // -------------------------------------------------
+ // ret addr slot
+ // vector vector
+ // sp -> slot map
+ // sp -> ret addr
+ //
+ __ xchg(map_reg, Operand(esp, 0));
+ __ xchg(map_reg, Operand(esp, 2 * kPointerSize));
__ push(map_reg);
- __ push(scratch);
}
@@ -574,6 +593,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
Label success;
__ jmp(&success);
GenerateRestoreName(miss, name);
+ if (IC::ICUseVector(kind())) PopVectorAndSlot();
TailCallBuiltin(masm(), MissBuiltin(kind()));
__ bind(&success);
}
diff --git a/deps/v8/src/ic/x87/ic-compiler-x87.cc b/deps/v8/src/ic/x87/ic-compiler-x87.cc
index 4d5fc6a712..d29e32108b 100644
--- a/deps/v8/src/ic/x87/ic-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/ic-compiler-x87.cc
@@ -112,7 +112,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
Label next_map;
__ j(not_equal, &next_map, Label::kNear);
Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
- __ LoadWeakValue(transition_map(), cell, &miss);
+ Register transition_map = scratch1();
+ DCHECK(!FLAG_vector_stores &&
+ transition_map.is(StoreTransitionDescriptor::MapRegister()));
+ __ LoadWeakValue(transition_map, cell, &miss);
__ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
__ bind(&next_map);
}
diff --git a/deps/v8/src/ic/x87/ic-x87.cc b/deps/v8/src/ic/x87/ic-x87.cc
index f9a94bc5b8..53e7a5ca0c 100644
--- a/deps/v8/src/ic/x87/ic-x87.cc
+++ b/deps/v8/src/ic/x87/ic-x87.cc
@@ -577,7 +577,7 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
- receiver, key, ebx, no_reg);
+ receiver, key, edi, no_reg);
if (FLAG_vector_stores) {
__ pop(VectorStoreICDescriptor::VectorRegister());
@@ -734,6 +734,12 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ if (FLAG_vector_stores) {
+ // This shouldn't be called.
+ __ int3();
+ return;
+ }
+
// Return address is on the stack.
Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
Code::ComputeHandlerFlags(Code::STORE_IC));
@@ -787,22 +793,32 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreDescriptor::NameRegister();
Register value = StoreDescriptor::ValueRegister();
- Register dictionary = ebx;
-
- __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ Register slot = VectorStoreICDescriptor::SlotRegister();
// A lot of registers are needed for storing to slow case
// objects. Push and restore receiver but rely on
// GenerateDictionaryStore preserving the value and name.
__ push(receiver);
+ if (FLAG_vector_stores) {
+ __ push(vector);
+ __ push(slot);
+ }
+
+ Register dictionary = ebx;
+ __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
receiver, edi);
- __ Drop(1);
+ __ Drop(FLAG_vector_stores ? 3 : 1);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->store_normal_hit(), 1);
__ ret(0);
__ bind(&restore_miss);
+ if (FLAG_vector_stores) {
+ __ pop(slot);
+ __ pop(vector);
+ }
__ pop(receiver);
__ IncrementCounter(counters->store_normal_miss(), 1);
GenerateMiss(masm);
diff --git a/deps/v8/src/ic/x87/stub-cache-x87.cc b/deps/v8/src/ic/x87/stub-cache-x87.cc
index d76d0a26b7..2522223ead 100644
--- a/deps/v8/src/ic/x87/stub-cache-x87.cc
+++ b/deps/v8/src/ic/x87/stub-cache-x87.cc
@@ -23,8 +23,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
Label miss;
+ bool is_vector_store =
+ IC::ICUseVector(ic_kind) &&
+ (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
// Multiply by 3 because there are 3 fields per entry (name, code, map).
__ lea(offset, Operand(offset, offset, times_2, 0));
@@ -56,19 +61,29 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
}
#endif
- if (IC::ICUseVector(ic_kind)) {
- // The vector and slot were pushed onto the stack before starting the
- // probe, and need to be dropped before calling the handler.
+ // The vector and slot were pushed onto the stack before starting the
+ // probe, and need to be dropped before calling the handler.
+ if (is_vector_store) {
+ // The overlap here is rather embarrassing. One does what one must.
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(extra.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ pop(vector);
+ __ mov(Operand::StaticVariable(virtual_register), extra);
+ __ pop(extra); // Pop "slot".
+ // Jump to the first instruction in the code stub.
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
__ pop(LoadWithVectorDescriptor::VectorRegister());
__ pop(LoadDescriptor::SlotRegister());
+ __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(extra);
}
- // Jump to the first instruction in the code stub.
- __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(extra);
-
__ bind(&miss);
} else {
+ DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
+
// Save the offset on the stack.
__ push(offset);
@@ -105,21 +120,22 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
__ pop(offset);
__ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
- if (IC::ICUseVector(ic_kind)) {
+ // Jump to the first instruction in the code stub.
+ if (is_vector_store) {
// The vector and slot were pushed onto the stack before starting the
// probe, and need to be dropped before calling the handler.
- Register vector = LoadWithVectorDescriptor::VectorRegister();
- Register slot = LoadDescriptor::SlotRegister();
- DCHECK(!offset.is(vector) && !offset.is(slot));
-
+ Register vector = VectorStoreICDescriptor::VectorRegister();
+ DCHECK(offset.is(VectorStoreICDescriptor::SlotRegister()));
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ mov(Operand::StaticVariable(virtual_register), offset);
__ pop(vector);
- __ pop(slot);
+ __ pop(offset); // Pop "slot".
+ __ jmp(Operand::StaticVariable(virtual_register));
+ } else {
+ __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(offset);
}
- // Jump to the first instruction in the code stub.
- __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(offset);
-
// Pop at miss.
__ bind(&miss);
__ pop(offset);
diff --git a/deps/v8/src/heap/identity-map.cc b/deps/v8/src/identity-map.cc
index f901ac4424..1d23af95e8 100644
--- a/deps/v8/src/heap/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/heap/identity-map.h"
+#include "src/identity-map.h"
#include "src/heap/heap.h"
#include "src/zone-containers.h"
diff --git a/deps/v8/src/heap/identity-map.h b/deps/v8/src/identity-map.h
index 672ca5a52a..2143e24e37 100644
--- a/deps/v8/src/heap/identity-map.h
+++ b/deps/v8/src/identity-map.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_IDENTITY_MAP_H_
-#define V8_HEAP_IDENTITY_MAP_H_
+#ifndef V8_IDENTITY_MAP_H_
+#define V8_IDENTITY_MAP_H_
#include "src/handles.h"
@@ -95,4 +95,4 @@ class IdentityMap : public IdentityMapBase {
}
} // namespace v8::internal
-#endif // V8_HEAP_IDENTITY_MAP_H_
+#endif // V8_IDENTITY_MAP_H_
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index da9eb2991f..b71f973120 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/interface-descriptors.h"
namespace v8 {
@@ -11,35 +9,36 @@ namespace internal {
namespace {
// Constructors for common combined semantic and representation types.
-Type* SmiType() {
- return Type::Intersect(Type::SignedSmall(), Type::TaggedSigned());
+Type* SmiType(Zone* zone) {
+ return Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), zone);
}
-Type* UntaggedSigned32() {
- return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32());
+Type* UntaggedSigned32(Zone* zone) {
+ return Type::Intersect(Type::Signed32(), Type::UntaggedSigned32(), zone);
}
-Type* AnyTagged() {
+Type* AnyTagged(Zone* zone) {
return Type::Intersect(
- Type::Any(), Type::Union(Type::TaggedPointer(), Type::TaggedSigned()));
+ Type::Any(),
+ Type::Union(Type::TaggedPointer(), Type::TaggedSigned(), zone), zone);
}
-Type* ExternalPointer() {
- return Type::Intersect(Type::Internal(), Type::UntaggedPointer());
+Type* ExternalPointer(Zone* zone) {
+ return Type::Intersect(Type::Internal(), Type::UntaggedPointer(), zone);
}
}
Type::FunctionType* CallInterfaceDescriptor::BuildDefaultFunctionType(
Isolate* isolate, int parameter_count) {
- Type::FunctionType* function =
- Type::FunctionType::New(AnyTagged(), Type::Undefined(), parameter_count,
- isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function = Type::FunctionType::New(
+ AnyTagged(zone), Type::Undefined(), parameter_count, zone);
while (parameter_count-- != 0) {
- function->InitParameter(parameter_count, AnyTagged());
+ function->InitParameter(parameter_count, AnyTagged(zone));
}
return function;
}
@@ -78,11 +77,12 @@ const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) const {
Type::FunctionType* LoadDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, AnyTagged());
- function->InitParameter(2, SmiType());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, AnyTagged(zone));
+ function->InitParameter(2, SmiType(zone));
return function;
}
@@ -100,15 +100,25 @@ void StoreDescriptor::InitializePlatformSpecific(
}
+void StoreTransitionDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ MapRegister()};
+
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
Type::FunctionType*
StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged()); // Receiver
- function->InitParameter(1, AnyTagged()); // Name
- function->InitParameter(2, AnyTagged()); // Value
- function->InitParameter(3, AnyTagged()); // Map
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone)); // Receiver
+ function->InitParameter(1, AnyTagged(zone)); // Name
+ function->InitParameter(2, AnyTagged(zone)); // Value
+ function->InitParameter(3, AnyTagged(zone)); // Map
return function;
}
@@ -116,9 +126,10 @@ StoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
LoadGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 1, isolate->interface_descriptor_zone());
- function->InitParameter(0, UntaggedSigned32());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
+ function->InitParameter(0, UntaggedSigned32(zone));
return function;
}
@@ -133,10 +144,11 @@ void LoadGlobalViaContextDescriptor::InitializePlatformSpecific(
Type::FunctionType*
StoreGlobalViaContextDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
- function->InitParameter(0, UntaggedSigned32());
- function->InitParameter(1, AnyTagged());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ function->InitParameter(0, UntaggedSigned32(zone));
+ function->InitParameter(1, AnyTagged(zone));
return function;
}
@@ -148,9 +160,23 @@ void StoreGlobalViaContextDescriptor::InitializePlatformSpecific(
}
-void InstanceofDescriptor::InitializePlatformSpecific(
+void InstanceOfDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {LeftRegister(), RightRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void StringCompareDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {left(), right()};
+ Register registers[] = {LeftRegister(), RightRegister()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
+void ToStringDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {ReceiverRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -179,12 +205,13 @@ void MathPowIntegerDescriptor::InitializePlatformSpecific(
Type::FunctionType*
LoadWithVectorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, AnyTagged());
- function->InitParameter(2, SmiType());
- function->InitParameter(3, AnyTagged());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, AnyTagged(zone));
+ function->InitParameter(2, SmiType(zone));
+ function->InitParameter(3, AnyTagged(zone));
return function;
}
@@ -198,15 +225,32 @@ void LoadWithVectorDescriptor::InitializePlatformSpecific(
Type::FunctionType*
+VectorStoreTransitionDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 6, zone);
+ function->InitParameter(0, AnyTagged(zone)); // receiver
+ function->InitParameter(1, AnyTagged(zone)); // name
+ function->InitParameter(2, AnyTagged(zone)); // value
+ function->InitParameter(3, SmiType(zone)); // slot
+ function->InitParameter(4, AnyTagged(zone)); // vector
+ function->InitParameter(5, AnyTagged(zone)); // map
+ return function;
+}
+
+
+Type::FunctionType*
VectorStoreICDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 5, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, AnyTagged());
- function->InitParameter(2, AnyTagged());
- function->InitParameter(3, SmiType());
- function->InitParameter(4, AnyTagged());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, AnyTagged(zone));
+ function->InitParameter(2, AnyTagged(zone));
+ function->InitParameter(3, SmiType(zone));
+ function->InitParameter(4, AnyTagged(zone));
return function;
}
@@ -222,12 +266,13 @@ void VectorStoreICDescriptor::InitializePlatformSpecific(
Type::FunctionType*
VectorStoreICTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, AnyTagged());
- function->InitParameter(2, AnyTagged());
- function->InitParameter(3, SmiType());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, AnyTagged(zone));
+ function->InitParameter(2, AnyTagged(zone));
+ function->InitParameter(3, SmiType(zone));
return function;
}
@@ -243,9 +288,10 @@ void VectorStoreICTrampolineDescriptor::InitializePlatformSpecific(
Type::FunctionType*
ApiGetterDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 1, isolate->interface_descriptor_zone());
- function->InitParameter(0, ExternalPointer());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 1, zone);
+ function->InitParameter(0, ExternalPointer(zone));
return function;
}
@@ -264,6 +310,26 @@ void ArgumentsAccessReadDescriptor::InitializePlatformSpecific(
}
+Type::FunctionType*
+ArgumentsAccessNewDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, SmiType(zone));
+ function->InitParameter(2, ExternalPointer(zone));
+ return function;
+}
+
+
+void ArgumentsAccessNewDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {function(), parameter_count(), parameter_pointer()};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void ContextOnlyDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr);
@@ -280,11 +346,12 @@ void GrowArrayElementsDescriptor::InitializePlatformSpecific(
Type::FunctionType*
FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, SmiType());
- function->InitParameter(2, AnyTagged());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, SmiType(zone));
+ function->InitParameter(2, AnyTagged(zone));
return function;
}
@@ -292,10 +359,11 @@ FastCloneShallowArrayDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, SmiType());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, SmiType(zone));
return function;
}
@@ -303,11 +371,25 @@ CreateAllocationSiteDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged());
- function->InitParameter(1, SmiType());
- function->InitParameter(2, AnyTagged());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
+ function->InitParameter(0, AnyTagged(zone));
+ function->InitParameter(1, SmiType(zone));
+ function->InitParameter(2, AnyTagged(zone));
+ return function;
+}
+
+
+Type::FunctionType*
+CallTrampolineDescriptor::BuildCallInterfaceDescriptorFunctionType(
+ Isolate* isolate, int paramater_count) {
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
+ function->InitParameter(0, AnyTagged(zone)); // target
+ function->InitParameter(
+ 1, UntaggedSigned32(zone)); // actual number of arguments
return function;
}
@@ -315,10 +397,11 @@ CreateWeakCellDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, SmiType());
+ function->InitParameter(1, SmiType(zone));
return function;
}
@@ -326,11 +409,12 @@ CallFunctionWithFeedbackDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, SmiType());
- function->InitParameter(2, AnyTagged());
+ function->InitParameter(1, SmiType(zone));
+ function->InitParameter(2, AnyTagged(zone));
return function;
}
@@ -338,11 +422,12 @@ Type::FunctionType* CallFunctionWithFeedbackAndVectorDescriptor::
Type::FunctionType*
ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, AnyTagged());
- function->InitParameter(2, UntaggedSigned32());
+ function->InitParameter(1, AnyTagged(zone));
+ function->InitParameter(2, UntaggedSigned32(zone));
return function;
}
@@ -350,10 +435,11 @@ ArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 2, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 2, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, UntaggedSigned32());
+ function->InitParameter(1, UntaggedSigned32(zone));
return function;
}
@@ -361,12 +447,15 @@ InternalArrayConstructorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 3, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 3, zone);
function->InitParameter(0, Type::Receiver()); // JSFunction
- function->InitParameter(1, UntaggedSigned32()); // actual number of arguments
- function->InitParameter(2,
- UntaggedSigned32()); // expected number of arguments
+ function->InitParameter(
+ 1, UntaggedSigned32(zone)); // actual number of arguments
+ function->InitParameter(
+ 2,
+ UntaggedSigned32(zone)); // expected number of arguments
return function;
}
@@ -374,13 +463,15 @@ ArgumentAdaptorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 5, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged()); // callee
- function->InitParameter(1, AnyTagged()); // call_data
- function->InitParameter(2, AnyTagged()); // holder
- function->InitParameter(3, ExternalPointer()); // api_function_address
- function->InitParameter(4, UntaggedSigned32()); // actual number of arguments
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
+ function->InitParameter(0, AnyTagged(zone)); // callee
+ function->InitParameter(1, AnyTagged(zone)); // call_data
+ function->InitParameter(2, AnyTagged(zone)); // holder
+ function->InitParameter(3, ExternalPointer(zone)); // api_function_address
+ function->InitParameter(
+ 4, UntaggedSigned32(zone)); // actual number of arguments
return function;
}
@@ -388,12 +479,13 @@ ApiFunctionDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType*
ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Isolate* isolate, int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
- function->InitParameter(0, AnyTagged()); // callee
- function->InitParameter(1, AnyTagged()); // call_data
- function->InitParameter(2, AnyTagged()); // holder
- function->InitParameter(3, ExternalPointer()); // api_function_address
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
+ function->InitParameter(0, AnyTagged(zone)); // callee
+ function->InitParameter(1, AnyTagged(zone)); // call_data
+ function->InitParameter(2, AnyTagged(zone)); // holder
+ function->InitParameter(3, ExternalPointer(zone)); // api_function_address
return function;
}
@@ -401,12 +493,13 @@ ApiAccessorDescriptor::BuildCallInterfaceDescriptorFunctionType(
Type::FunctionType* MathRoundVariantCallFromUnoptimizedCodeDescriptor::
BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 4, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 4, zone);
function->InitParameter(0, Type::Receiver());
- function->InitParameter(1, SmiType());
- function->InitParameter(2, AnyTagged());
- function->InitParameter(3, AnyTagged());
+ function->InitParameter(1, SmiType(zone));
+ function->InitParameter(2, AnyTagged(zone));
+ function->InitParameter(3, AnyTagged(zone));
return function;
}
@@ -414,13 +507,14 @@ Type::FunctionType* MathRoundVariantCallFromUnoptimizedCodeDescriptor::
Type::FunctionType* MathRoundVariantCallFromOptimizedCodeDescriptor::
BuildCallInterfaceDescriptorFunctionType(Isolate* isolate,
int paramater_count) {
- Type::FunctionType* function = Type::FunctionType::New(
- AnyTagged(), Type::Undefined(), 5, isolate->interface_descriptor_zone());
+ Zone* zone = isolate->interface_descriptor_zone();
+ Type::FunctionType* function =
+ Type::FunctionType::New(AnyTagged(zone), Type::Undefined(), 5, zone);
function->InitParameter(0, Type::Receiver());
- function->InitParameter(1, SmiType());
- function->InitParameter(2, AnyTagged());
- function->InitParameter(3, AnyTagged());
- function->InitParameter(4, AnyTagged());
+ function->InitParameter(1, SmiType(zone));
+ function->InitParameter(2, AnyTagged(zone));
+ function->InitParameter(3, AnyTagged(zone));
+ function->InitParameter(4, AnyTagged(zone));
return function;
}
} // namespace internal
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index a016797623..534313f7d3 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -17,13 +17,15 @@ class PlatformInterfaceDescriptor;
V(Load) \
V(Store) \
V(StoreTransition) \
+ V(VectorStoreTransition) \
V(VectorStoreICTrampoline) \
V(VectorStoreIC) \
- V(Instanceof) \
+ V(InstanceOf) \
V(LoadWithVector) \
V(FastNewClosure) \
V(FastNewContext) \
V(ToNumber) \
+ V(ToString) \
V(ToObject) \
V(NumberToString) \
V(Typeof) \
@@ -35,6 +37,8 @@ class PlatformInterfaceDescriptor;
V(CallFunctionWithFeedback) \
V(CallFunctionWithFeedbackAndVector) \
V(CallConstruct) \
+ V(CallTrampoline) \
+ V(PushArgsAndCall) \
V(RegExpConstructResult) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
@@ -48,6 +52,7 @@ class PlatformInterfaceDescriptor;
V(BinaryOp) \
V(BinaryOpWithAllocationSite) \
V(StringAdd) \
+ V(StringCompare) \
V(Keyed) \
V(Named) \
V(CallHandler) \
@@ -56,6 +61,7 @@ class PlatformInterfaceDescriptor;
V(ApiAccessor) \
V(ApiGetter) \
V(ArgumentsAccessRead) \
+ V(ArgumentsAccessNew) \
V(StoreArrayLiteralElement) \
V(LoadGlobalViaContext) \
V(StoreGlobalViaContext) \
@@ -263,18 +269,40 @@ class StoreTransitionDescriptor : public StoreDescriptor {
kParameterCount
};
- // MapRegister() is no_reg on ia32, instead it's on the stack.
static const Register MapRegister();
};
-class InstanceofDescriptor : public CallInterfaceDescriptor {
+class VectorStoreTransitionDescriptor : public StoreDescriptor {
public:
- DECLARE_DESCRIPTOR(InstanceofDescriptor, CallInterfaceDescriptor)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(VectorStoreTransitionDescriptor,
+ StoreDescriptor)
+
+ // Extends StoreDescriptor with Map parameter.
+ enum ParameterIndices {
+ kReceiverIndex,
+ kNameIndex,
+ kValueIndex,
+ kSlotIndex,
+ kVectorIndex,
+ kMapIndex,
+ kParameterCount
+ };
+
+ // These registers are no_reg for ia32, using the stack instead.
+ static const Register SlotRegister();
+ static const Register VectorRegister();
+ static const Register MapRegister();
+};
+
+
+class InstanceOfDescriptor final : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(InstanceOfDescriptor, CallInterfaceDescriptor)
enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
- static const Register left();
- static const Register right();
+ static const Register LeftRegister();
+ static const Register RightRegister();
};
@@ -340,6 +368,16 @@ class ToNumberDescriptor : public CallInterfaceDescriptor {
};
+class ToStringDescriptor : public CallInterfaceDescriptor {
+ public:
+ enum ParameterIndices { kReceiverIndex };
+
+ DECLARE_DESCRIPTOR(ToStringDescriptor, CallInterfaceDescriptor)
+
+ static const Register ReceiverRegister();
+};
+
+
class ToObjectDescriptor : public CallInterfaceDescriptor {
public:
enum ParameterIndices { kReceiverIndex };
@@ -396,6 +434,13 @@ class CreateWeakCellDescriptor : public CallInterfaceDescriptor {
};
+class CallTrampolineDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallTrampolineDescriptor,
+ CallInterfaceDescriptor)
+};
+
+
class CallFunctionDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
@@ -527,6 +572,16 @@ class StringAddDescriptor : public CallInterfaceDescriptor {
};
+class StringCompareDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(StringCompareDescriptor, CallInterfaceDescriptor)
+
+ enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
+ static const Register LeftRegister();
+ static const Register RightRegister();
+};
+
+
class KeyedDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(KeyedDescriptor, CallInterfaceDescriptor)
@@ -584,6 +639,17 @@ class ArgumentsAccessReadDescriptor : public CallInterfaceDescriptor {
};
+class ArgumentsAccessNewDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(ArgumentsAccessNewDescriptor,
+ CallInterfaceDescriptor)
+
+ static const Register function();
+ static const Register parameter_count();
+ static const Register parameter_pointer();
+};
+
+
class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
@@ -639,6 +705,12 @@ class GrowArrayElementsDescriptor : public CallInterfaceDescriptor {
static const Register KeyRegister();
};
+
+class PushArgsAndCallDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(PushArgsAndCallDescriptor, CallInterfaceDescriptor)
+};
+
#undef DECLARE_DESCRIPTOR
diff --git a/deps/v8/src/interpreter/DEPS b/deps/v8/src/interpreter/DEPS
index f6fc3f63d7..f8d6b98fd8 100644
--- a/deps/v8/src/interpreter/DEPS
+++ b/deps/v8/src/interpreter/DEPS
@@ -1,4 +1,3 @@
include_rules = [
"+src/compiler/interpreter-assembler.h",
- "-src/v8.h",
]
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 24fec96bfa..9c6b5905cc 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -8,9 +8,16 @@ namespace v8 {
namespace internal {
namespace interpreter {
-BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate)
+BytecodeArrayBuilder::BytecodeArrayBuilder(Isolate* isolate, Zone* zone)
: isolate_(isolate),
+ bytecodes_(zone),
bytecode_generated_(false),
+ last_block_end_(0),
+ last_bytecode_start_(~0),
+ return_seen_in_block_(false),
+ constants_map_(isolate->heap(), zone),
+ constants_(zone),
+ parameter_count_(-1),
local_register_count_(-1),
temporary_register_count_(0),
temporary_register_next_(0) {}
@@ -25,22 +32,100 @@ void BytecodeArrayBuilder::set_locals_count(int number_of_locals) {
int BytecodeArrayBuilder::locals_count() const { return local_register_count_; }
+void BytecodeArrayBuilder::set_parameter_count(int number_of_parameters) {
+ parameter_count_ = number_of_parameters;
+}
+
+
+int BytecodeArrayBuilder::parameter_count() const { return parameter_count_; }
+
+
+Register BytecodeArrayBuilder::Parameter(int parameter_index) {
+ DCHECK_GE(parameter_index, 0);
+ DCHECK_LT(parameter_index, parameter_count_);
+ return Register::FromParameterIndex(parameter_index, parameter_count_);
+}
+
+
Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray() {
DCHECK_EQ(bytecode_generated_, false);
+ DCHECK_GE(parameter_count_, 0);
DCHECK_GE(local_register_count_, 0);
+
+ EnsureReturn();
+
int bytecode_size = static_cast<int>(bytecodes_.size());
int register_count = local_register_count_ + temporary_register_count_;
int frame_size = register_count * kPointerSize;
- Handle<BytecodeArray> output = isolate_->factory()->NewBytecodeArray(
- bytecode_size, &bytecodes_.front(), frame_size);
+
+ Factory* factory = isolate_->factory();
+ int constants_count = static_cast<int>(constants_.size());
+ Handle<FixedArray> constant_pool =
+ factory->NewFixedArray(constants_count, TENURED);
+ for (int i = 0; i < constants_count; i++) {
+ constant_pool->set(i, *constants_[i]);
+ }
+
+ Handle<BytecodeArray> output =
+ factory->NewBytecodeArray(bytecode_size, &bytecodes_.front(), frame_size,
+ parameter_count_, constant_pool);
bytecode_generated_ = true;
return output;
}
-BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value binop,
+template <size_t N>
+void BytecodeArrayBuilder::Output(uint8_t(&bytes)[N]) {
+ DCHECK_EQ(Bytecodes::NumberOfOperands(Bytecodes::FromByte(bytes[0])),
+ static_cast<int>(N) - 1);
+ last_bytecode_start_ = bytecodes()->size();
+ for (int i = 1; i < static_cast<int>(N); i++) {
+ DCHECK(OperandIsValid(Bytecodes::FromByte(bytes[0]), i - 1, bytes[i]));
+ }
+ bytecodes()->insert(bytecodes()->end(), bytes, bytes + N);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
+ uint8_t operand1, uint8_t operand2) {
+ uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0, operand1, operand2};
+ Output(bytes);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
+ uint8_t operand1) {
+ uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0, operand1};
+ Output(bytes);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0) {
+ uint8_t bytes[] = {Bytecodes::ToByte(bytecode), operand0};
+ Output(bytes);
+}
+
+
+void BytecodeArrayBuilder::Output(Bytecode bytecode) {
+ uint8_t bytes[] = {Bytecodes::ToByte(bytecode)};
+ Output(bytes);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg) {
- Output(BytecodeForBinaryOperation(binop), reg.ToOperand());
+ Output(BytecodeForBinaryOperation(op), reg.ToOperand());
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CompareOperation(
+ Token::Value op, Register reg, LanguageMode language_mode) {
+ if (!is_sloppy(language_mode)) {
+ UNIMPLEMENTED();
+ }
+
+ Output(BytecodeForCompareOperation(op), reg.ToOperand());
return *this;
}
@@ -53,7 +138,17 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(
} else if (raw_smi >= -128 && raw_smi <= 127) {
Output(Bytecode::kLdaSmi8, static_cast<uint8_t>(raw_smi));
} else {
- // TODO(oth): Put Smi in constant pool.
+ LoadLiteral(Handle<Object>(smi, isolate_));
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadLiteral(Handle<Object> object) {
+ size_t entry = GetConstantPoolEntry(object);
+ if (FitsInIdxOperand(entry)) {
+ Output(Bytecode::kLdaConstant, static_cast<uint8_t>(entry));
+ } else {
UNIMPLEMENTED();
}
return *this;
@@ -104,12 +199,284 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::StoreAccumulatorInRegister(
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadGlobal(int slot_index) {
+ DCHECK(slot_index >= 0);
+ if (FitsInIdxOperand(slot_index)) {
+ Output(Bytecode::kLdaGlobal, static_cast<uint8_t>(slot_index));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadNamedProperty(
+ Register object, int feedback_slot, LanguageMode language_mode) {
+ if (!is_sloppy(language_mode)) {
+ UNIMPLEMENTED();
+ }
+
+ if (FitsInIdxOperand(feedback_slot)) {
+ Output(Bytecode::kLoadIC, object.ToOperand(),
+ static_cast<uint8_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LoadKeyedProperty(
+ Register object, int feedback_slot, LanguageMode language_mode) {
+ if (!is_sloppy(language_mode)) {
+ UNIMPLEMENTED();
+ }
+
+ if (FitsInIdxOperand(feedback_slot)) {
+ Output(Bytecode::kKeyedLoadIC, object.ToOperand(),
+ static_cast<uint8_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreNamedProperty(
+ Register object, Register name, int feedback_slot,
+ LanguageMode language_mode) {
+ if (!is_sloppy(language_mode)) {
+ UNIMPLEMENTED();
+ }
+
+ if (FitsInIdxOperand(feedback_slot)) {
+ Output(Bytecode::kStoreIC, object.ToOperand(), name.ToOperand(),
+ static_cast<uint8_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::StoreKeyedProperty(
+ Register object, Register key, int feedback_slot,
+ LanguageMode language_mode) {
+ if (!is_sloppy(language_mode)) {
+ UNIMPLEMENTED();
+ }
+
+ if (FitsInIdxOperand(feedback_slot)) {
+ Output(Bytecode::kKeyedStoreIC, object.ToOperand(), key.ToOperand(),
+ static_cast<uint8_t>(feedback_slot));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::CastAccumulatorToBoolean() {
+ if (LastBytecodeInSameBlock()) {
+ // If the previous bytecode puts a boolean in the accumulator
+ // there is no need to emit an instruction.
+ switch (Bytecodes::FromByte(bytecodes()->at(last_bytecode_start_))) {
+ case Bytecode::kToBoolean:
+ UNREACHABLE();
+ case Bytecode::kLdaTrue:
+ case Bytecode::kLdaFalse:
+ case Bytecode::kTestEqual:
+ case Bytecode::kTestNotEqual:
+ case Bytecode::kTestEqualStrict:
+ case Bytecode::kTestNotEqualStrict:
+ case Bytecode::kTestLessThan:
+ case Bytecode::kTestLessThanOrEqual:
+ case Bytecode::kTestGreaterThan:
+ case Bytecode::kTestGreaterThanOrEqual:
+ case Bytecode::kTestInstanceOf:
+ case Bytecode::kTestIn:
+ break;
+ default:
+ Output(Bytecode::kToBoolean);
+ }
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
+ if (label->is_forward_target()) {
+ // An earlier jump instruction refers to this label. Update it's location.
+ PatchJump(bytecodes()->end(), bytecodes()->begin() + label->offset());
+ // Now treat as if the label will only be back referred to.
+ }
+ label->bind_to(bytecodes()->size());
+ return *this;
+}
+
+
+// static
+bool BytecodeArrayBuilder::IsJumpWithImm8Operand(Bytecode jump_bytecode) {
+ return jump_bytecode == Bytecode::kJump ||
+ jump_bytecode == Bytecode::kJumpIfTrue ||
+ jump_bytecode == Bytecode::kJumpIfFalse;
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::GetJumpWithConstantOperand(
+ Bytecode jump_bytecode) {
+ switch (jump_bytecode) {
+ case Bytecode::kJump:
+ return Bytecode::kJumpConstant;
+ case Bytecode::kJumpIfTrue:
+ return Bytecode::kJumpIfTrueConstant;
+ case Bytecode::kJumpIfFalse:
+ return Bytecode::kJumpIfFalseConstant;
+ default:
+ UNREACHABLE();
+ return Bytecode::kJumpConstant;
+ }
+}
+
+
+void BytecodeArrayBuilder::PatchJump(
+ const ZoneVector<uint8_t>::iterator& jump_target,
+ ZoneVector<uint8_t>::iterator jump_location) {
+ Bytecode jump_bytecode = Bytecodes::FromByte(*jump_location);
+ int delta = static_cast<int>(jump_target - jump_location);
+
+ DCHECK(IsJumpWithImm8Operand(jump_bytecode));
+ DCHECK_EQ(Bytecodes::Size(jump_bytecode), 2);
+ DCHECK_GE(delta, 0);
+
+ if (FitsInImm8Operand(delta)) {
+ // Just update the operand
+ jump_location++;
+ *jump_location = static_cast<uint8_t>(delta);
+ } else {
+ // Update the jump type and operand
+ size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
+ if (FitsInIdxOperand(entry)) {
+ *jump_location++ =
+ Bytecodes::ToByte(GetJumpWithConstantOperand(jump_bytecode));
+ *jump_location = static_cast<uint8_t>(entry);
+ } else {
+ // TODO(oth): OutputJump should reserve a constant pool entry
+ // when jump is written. The reservation should be used here if
+ // needed, or cancelled if not. This is due to the patch needing
+ // to match the size of the code it's replacing. In future,
+ // there will probably be a jump with 32-bit operand for cases
+ // when constant pool is full, but that needs to be emitted in
+ // OutputJump too.
+ UNIMPLEMENTED();
+ }
+ }
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::OutputJump(Bytecode jump_bytecode,
+ BytecodeLabel* label) {
+ int delta;
+ if (label->is_bound()) {
+ // Label has been bound already so this is a backwards jump.
+ CHECK_GE(bytecodes()->size(), label->offset());
+ CHECK_LE(bytecodes()->size(), static_cast<size_t>(kMaxInt));
+ size_t abs_delta = bytecodes()->size() - label->offset();
+ delta = -static_cast<int>(abs_delta);
+ } else {
+ // Label has not yet been bound so this is a forward reference
+ // that will be patched when the label is bound.
+ label->set_referrer(bytecodes()->size());
+ delta = 0;
+ }
+
+ if (FitsInImm8Operand(delta)) {
+ Output(jump_bytecode, static_cast<uint8_t>(delta));
+ } else {
+ size_t entry = GetConstantPoolEntry(handle(Smi::FromInt(delta), isolate()));
+ if (FitsInIdxOperand(entry)) {
+ Output(GetJumpWithConstantOperand(jump_bytecode),
+ static_cast<uint8_t>(entry));
+ } else {
+ UNIMPLEMENTED();
+ }
+ }
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Jump(BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJump, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfTrue(BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfTrue, label);
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::JumpIfFalse(BytecodeLabel* label) {
+ return OutputJump(Bytecode::kJumpIfFalse, label);
+}
+
+
BytecodeArrayBuilder& BytecodeArrayBuilder::Return() {
Output(Bytecode::kReturn);
+ return_seen_in_block_ = true;
+ return *this;
+}
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::EnterBlock() { return *this; }
+
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::LeaveBlock() {
+ last_block_end_ = bytecodes()->size();
+ return_seen_in_block_ = false;
return *this;
}
+void BytecodeArrayBuilder::EnsureReturn() {
+ if (!return_seen_in_block_) {
+ LoadUndefined();
+ Return();
+ }
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Call(Register callable,
+ Register receiver,
+ size_t arg_count) {
+ if (FitsInIdxOperand(arg_count)) {
+ Output(Bytecode::kCall, callable.ToOperand(), receiver.ToOperand(),
+ static_cast<uint8_t>(arg_count));
+ } else {
+ UNIMPLEMENTED();
+ }
+ return *this;
+}
+
+
+size_t BytecodeArrayBuilder::GetConstantPoolEntry(Handle<Object> object) {
+ // These constants shouldn't be added to the constant pool, the should use
+ // specialzed bytecodes instead.
+ DCHECK(!object.is_identical_to(isolate_->factory()->undefined_value()));
+ DCHECK(!object.is_identical_to(isolate_->factory()->null_value()));
+ DCHECK(!object.is_identical_to(isolate_->factory()->the_hole_value()));
+ DCHECK(!object.is_identical_to(isolate_->factory()->true_value()));
+ DCHECK(!object.is_identical_to(isolate_->factory()->false_value()));
+
+ size_t* entry = constants_map_.Find(object);
+ if (!entry) {
+ entry = constants_map_.Get(object);
+ *entry = constants_.size();
+ constants_.push_back(object);
+ }
+ DCHECK(constants_[*entry].is_identical_to(object));
+ return *entry;
+}
+
+
int BytecodeArrayBuilder::BorrowTemporaryRegister() {
DCHECK_GE(local_register_count_, 0);
int temporary_reg_index = temporary_register_next_++;
@@ -133,52 +500,27 @@ bool BytecodeArrayBuilder::OperandIsValid(Bytecode bytecode, int operand_index,
switch (operand_type) {
case OperandType::kNone:
return false;
+ case OperandType::kCount:
case OperandType::kImm8:
+ case OperandType::kIdx:
return true;
- case OperandType::kReg:
- return Register::FromOperand(operand_value).index() <
- temporary_register_next_;
+ case OperandType::kReg: {
+ Register reg = Register::FromOperand(operand_value);
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count_);
+ return parameter_index >= 0 && parameter_index < parameter_count_;
+ } else {
+ return (reg.index() >= 0 && reg.index() < temporary_register_next_);
+ }
+ }
}
UNREACHABLE();
return false;
}
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
- uint8_t operand1, uint8_t operand2) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 3);
- DCHECK(OperandIsValid(bytecode, 0, operand0) &&
- OperandIsValid(bytecode, 1, operand1) &&
- OperandIsValid(bytecode, 2, operand2));
- bytecodes_.push_back(Bytecodes::ToByte(bytecode));
- bytecodes_.push_back(operand0);
- bytecodes_.push_back(operand1);
- bytecodes_.push_back(operand2);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0,
- uint8_t operand1) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 2);
- DCHECK(OperandIsValid(bytecode, 0, operand0) &&
- OperandIsValid(bytecode, 1, operand1));
- bytecodes_.push_back(Bytecodes::ToByte(bytecode));
- bytecodes_.push_back(operand0);
- bytecodes_.push_back(operand1);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode, uint8_t operand0) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 1);
- DCHECK(OperandIsValid(bytecode, 0, operand0));
- bytecodes_.push_back(Bytecodes::ToByte(bytecode));
- bytecodes_.push_back(operand0);
-}
-
-
-void BytecodeArrayBuilder::Output(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode), 0);
- bytecodes_.push_back(Bytecodes::ToByte(bytecode));
+bool BytecodeArrayBuilder::LastBytecodeInSameBlock() const {
+ return last_bytecode_start_ < bytecodes()->size() &&
+ last_bytecode_start_ >= last_block_end_;
}
@@ -193,13 +535,63 @@ Bytecode BytecodeArrayBuilder::BytecodeForBinaryOperation(Token::Value op) {
return Bytecode::kMul;
case Token::Value::DIV:
return Bytecode::kDiv;
+ case Token::Value::MOD:
+ return Bytecode::kMod;
default:
- UNIMPLEMENTED();
+ UNREACHABLE();
+ return static_cast<Bytecode>(-1);
+ }
+}
+
+
+// static
+Bytecode BytecodeArrayBuilder::BytecodeForCompareOperation(Token::Value op) {
+ switch (op) {
+ case Token::Value::EQ:
+ return Bytecode::kTestEqual;
+ case Token::Value::NE:
+ return Bytecode::kTestNotEqual;
+ case Token::Value::EQ_STRICT:
+ return Bytecode::kTestEqualStrict;
+ case Token::Value::NE_STRICT:
+ return Bytecode::kTestNotEqualStrict;
+ case Token::Value::LT:
+ return Bytecode::kTestLessThan;
+ case Token::Value::GT:
+ return Bytecode::kTestGreaterThan;
+ case Token::Value::LTE:
+ return Bytecode::kTestLessThanOrEqual;
+ case Token::Value::GTE:
+ return Bytecode::kTestGreaterThanOrEqual;
+ case Token::Value::INSTANCEOF:
+ return Bytecode::kTestInstanceOf;
+ case Token::Value::IN:
+ return Bytecode::kTestIn;
+ default:
+ UNREACHABLE();
return static_cast<Bytecode>(-1);
}
}
+// static
+bool BytecodeArrayBuilder::FitsInIdxOperand(int value) {
+ return kMinUInt8 <= value && value <= kMaxUInt8;
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInIdxOperand(size_t value) {
+ return value <= static_cast<size_t>(kMaxUInt8);
+}
+
+
+// static
+bool BytecodeArrayBuilder::FitsInImm8Operand(int value) {
+ return kMinInt8 <= value && value < kMaxInt8;
+}
+
+
TemporaryRegisterScope::TemporaryRegisterScope(BytecodeArrayBuilder* builder)
: builder_(builder), count_(0), last_register_index_(-1) {}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index c4ab816665..d68d5e7ffb 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -8,7 +8,10 @@
#include <vector>
#include "src/ast.h"
+#include "src/identity-map.h"
#include "src/interpreter/bytecodes.h"
+#include "src/zone.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
@@ -17,53 +20,129 @@ class Isolate;
namespace interpreter {
+class BytecodeLabel;
class Register;
class BytecodeArrayBuilder {
public:
- explicit BytecodeArrayBuilder(Isolate* isolate);
+ BytecodeArrayBuilder(Isolate* isolate, Zone* zone);
Handle<BytecodeArray> ToBytecodeArray();
+ // Set number of parameters expected by function.
+ void set_parameter_count(int number_of_params);
+ int parameter_count() const;
+
// Set number of locals required for bytecode array.
void set_locals_count(int number_of_locals);
int locals_count() const;
+ Register Parameter(int parameter_index);
+
// Constant loads to accumulator.
BytecodeArrayBuilder& LoadLiteral(v8::internal::Smi* value);
+ BytecodeArrayBuilder& LoadLiteral(Handle<Object> object);
BytecodeArrayBuilder& LoadUndefined();
BytecodeArrayBuilder& LoadNull();
BytecodeArrayBuilder& LoadTheHole();
BytecodeArrayBuilder& LoadTrue();
BytecodeArrayBuilder& LoadFalse();
+ // Global loads to accumulator.
+ BytecodeArrayBuilder& LoadGlobal(int slot_index);
+
// Register-accumulator transfers.
BytecodeArrayBuilder& LoadAccumulatorWithRegister(Register reg);
BytecodeArrayBuilder& StoreAccumulatorInRegister(Register reg);
- // Operators.
+ // Load properties. The property name should be in the accumulator.
+ BytecodeArrayBuilder& LoadNamedProperty(Register object, int feedback_slot,
+ LanguageMode language_mode);
+ BytecodeArrayBuilder& LoadKeyedProperty(Register object, int feedback_slot,
+ LanguageMode language_mode);
+
+ // Store properties. The value to be stored should be in the accumulator.
+ BytecodeArrayBuilder& StoreNamedProperty(Register object, Register name,
+ int feedback_slot,
+ LanguageMode language_mode);
+ BytecodeArrayBuilder& StoreKeyedProperty(Register object, Register key,
+ int feedback_slot,
+ LanguageMode language_mode);
+
+ // Call a JS function. The JSFunction or Callable to be called should be in
+ // |callable|, the receiver should be in |receiver| and all subsequent
+ // arguments should be in registers <receiver + 1> to
+ // <receiver + 1 + arg_count>.
+ BytecodeArrayBuilder& Call(Register callable, Register receiver,
+ size_t arg_count);
+
+ // Operators (register == lhs, accumulator = rhs).
BytecodeArrayBuilder& BinaryOperation(Token::Value binop, Register reg);
+ // Tests.
+ BytecodeArrayBuilder& CompareOperation(Token::Value op, Register reg,
+ LanguageMode language_mode);
+
+ // Casts
+ BytecodeArrayBuilder& CastAccumulatorToBoolean();
+
// Flow Control.
+ BytecodeArrayBuilder& Bind(BytecodeLabel* label);
+ BytecodeArrayBuilder& Jump(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfTrue(BytecodeLabel* label);
+ BytecodeArrayBuilder& JumpIfFalse(BytecodeLabel* label);
BytecodeArrayBuilder& Return();
+ BytecodeArrayBuilder& EnterBlock();
+ BytecodeArrayBuilder& LeaveBlock();
+
private:
- static Bytecode BytecodeForBinaryOperation(Token::Value op);
+ ZoneVector<uint8_t>* bytecodes() { return &bytecodes_; }
+ const ZoneVector<uint8_t>* bytecodes() const { return &bytecodes_; }
+ Isolate* isolate() const { return isolate_; }
- void Output(Bytecode bytecode, uint8_t r0, uint8_t r1, uint8_t r2);
- void Output(Bytecode bytecode, uint8_t r0, uint8_t r1);
- void Output(Bytecode bytecode, uint8_t r0);
+ static Bytecode BytecodeForBinaryOperation(Token::Value op);
+ static Bytecode BytecodeForCompareOperation(Token::Value op);
+ static bool FitsInIdxOperand(int value);
+ static bool FitsInIdxOperand(size_t value);
+ static bool FitsInImm8Operand(int value);
+ static bool IsJumpWithImm8Operand(Bytecode jump_bytecode);
+ static Bytecode GetJumpWithConstantOperand(Bytecode jump_with_smi8_operand);
+
+ template <size_t N>
+ INLINE(void Output(uint8_t(&bytes)[N]));
+ void Output(Bytecode bytecode, uint8_t operand0, uint8_t operand1,
+ uint8_t operand2);
+ void Output(Bytecode bytecode, uint8_t operand0, uint8_t operand1);
+ void Output(Bytecode bytecode, uint8_t operand0);
void Output(Bytecode bytecode);
+ void PatchJump(const ZoneVector<uint8_t>::iterator& jump_target,
+ ZoneVector<uint8_t>::iterator jump_location);
+ BytecodeArrayBuilder& OutputJump(Bytecode jump_bytecode,
+ BytecodeLabel* label);
+
+ void EnsureReturn();
bool OperandIsValid(Bytecode bytecode, int operand_index,
uint8_t operand_value) const;
+ bool LastBytecodeInSameBlock() const;
+
+ size_t GetConstantPoolEntry(Handle<Object> object);
+ // Scope helpers used by TemporaryRegisterScope
int BorrowTemporaryRegister();
void ReturnTemporaryRegister(int reg_index);
Isolate* isolate_;
- std::vector<uint8_t> bytecodes_;
+ ZoneVector<uint8_t> bytecodes_;
bool bytecode_generated_;
+ size_t last_block_end_;
+ size_t last_bytecode_start_;
+ bool return_seen_in_block_;
+ IdentityMap<size_t> constants_map_;
+ ZoneVector<Handle<Object>> constants_;
+
+ int parameter_count_;
int local_register_count_;
int temporary_register_count_;
int temporary_register_next_;
@@ -72,29 +151,47 @@ class BytecodeArrayBuilder {
DISALLOW_IMPLICIT_CONSTRUCTORS(BytecodeArrayBuilder);
};
-// An interpreter register which is located in the function's regsiter file
-// in its stack-frame.
-class Register {
+
+// A label representing a branch target in a bytecode array. When a
+// label is bound, it represents a known position in the bytecode
+// array. For labels that are forward references there can be at most
+// one reference whilst it is unbound.
+class BytecodeLabel final {
public:
- static const int kMaxRegisterIndex = 128;
+ BytecodeLabel() : bound_(false), offset_(kInvalidOffset) {}
+ ~BytecodeLabel() { DCHECK(bound_ && offset_ != kInvalidOffset); }
- explicit Register(int index) : index_(index) {
- DCHECK_LE(index_, kMaxRegisterIndex);
- }
+ private:
+ static const size_t kInvalidOffset = static_cast<size_t>(-1);
- int index() { return index_; }
- uint8_t ToOperand() { return static_cast<uint8_t>(-index_); }
- static Register FromOperand(uint8_t operand) {
- return Register(-static_cast<int8_t>(operand));
+ INLINE(void bind_to(size_t offset)) {
+ DCHECK(!bound_ && offset != kInvalidOffset);
+ offset_ = offset;
+ bound_ = true;
+ }
+ INLINE(void set_referrer(size_t offset)) {
+ DCHECK(!bound_ && offset != kInvalidOffset);
+ offset_ = offset;
+ }
+ INLINE(size_t offset() const) { return offset_; }
+ INLINE(bool is_bound() const) { return bound_; }
+ INLINE(bool is_forward_target() const) {
+ return offset() != kInvalidOffset && !is_bound();
}
- private:
- void* operator new(size_t size);
- void operator delete(void* p);
+ // There are three states for a label:
+ // bound_ offset_
+ // UNSET false kInvalidOffset
+ // FORWARD_TARGET false Offset of referring jump
+ // BACKWARD_TARGET true Offset of label in bytecode array when bound
+ bool bound_;
+ size_t offset_;
- int index_;
+ friend class BytecodeArrayBuilder;
+ DISALLOW_COPY_AND_ASSIGN(BytecodeLabel);
};
+
// A stack-allocated class than allows the instantiator to allocate
// temporary registers that are cleaned up when scope is closed.
class TemporaryRegisterScope {
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.cc b/deps/v8/src/interpreter/bytecode-array-iterator.cc
new file mode 100644
index 0000000000..dc49308fbe
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.cc
@@ -0,0 +1,72 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-array-iterator.h"
+
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+BytecodeArrayIterator::BytecodeArrayIterator(
+ Handle<BytecodeArray> bytecode_array)
+ : bytecode_array_(bytecode_array), bytecode_offset_(0) {}
+
+
+void BytecodeArrayIterator::Advance() {
+ bytecode_offset_ += Bytecodes::Size(current_bytecode());
+}
+
+
+bool BytecodeArrayIterator::done() const {
+ return bytecode_offset_ >= bytecode_array()->length();
+}
+
+
+Bytecode BytecodeArrayIterator::current_bytecode() const {
+ DCHECK(!done());
+ uint8_t current_byte = bytecode_array()->get(bytecode_offset_);
+ return interpreter::Bytecodes::FromByte(current_byte);
+}
+
+
+uint8_t BytecodeArrayIterator::GetRawOperand(int operand_index,
+ OperandType operand_type) const {
+ DCHECK_GE(operand_index, 0);
+ DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(current_bytecode()));
+ DCHECK_EQ(operand_type,
+ Bytecodes::GetOperandType(current_bytecode(), operand_index));
+ int operands_start = bytecode_offset_ + 1;
+ return bytecode_array()->get(operands_start + operand_index);
+}
+
+
+int8_t BytecodeArrayIterator::GetSmi8Operand(int operand_index) const {
+ uint8_t operand = GetRawOperand(operand_index, OperandType::kImm8);
+ return static_cast<int8_t>(operand);
+}
+
+
+int BytecodeArrayIterator::GetIndexOperand(int operand_index) const {
+ uint8_t operand = GetRawOperand(operand_index, OperandType::kIdx);
+ return static_cast<int>(operand);
+}
+
+
+Register BytecodeArrayIterator::GetRegisterOperand(int operand_index) const {
+ uint8_t operand = GetRawOperand(operand_index, OperandType::kReg);
+ return Register::FromOperand(operand);
+}
+
+
+Handle<Object> BytecodeArrayIterator::GetConstantForIndexOperand(
+ int operand_index) const {
+ Handle<FixedArray> constants = handle(bytecode_array()->constant_pool());
+ return FixedArray::get(constants, GetIndexOperand(operand_index));
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-iterator.h b/deps/v8/src/interpreter/bytecode-array-iterator.h
new file mode 100644
index 0000000000..0d9011f242
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-array-iterator.h
@@ -0,0 +1,48 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+#define V8_INTERPRETER_BYTECODE_ARRAY_ITERATOR_H_
+
+#include "src/handles.h"
+#include "src/interpreter/bytecodes.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayIterator {
+ public:
+ explicit BytecodeArrayIterator(Handle<BytecodeArray> bytecode_array);
+
+ void Advance();
+ bool done() const;
+ Bytecode current_bytecode() const;
+ int current_offset() const { return bytecode_offset_; }
+ const Handle<BytecodeArray>& bytecode_array() const {
+ return bytecode_array_;
+ }
+
+ int8_t GetSmi8Operand(int operand_index) const;
+ int GetIndexOperand(int operand_index) const;
+ Register GetRegisterOperand(int operand_index) const;
+ Handle<Object> GetConstantForIndexOperand(int operand_index) const;
+
+ // Get the raw byte for the given operand. Note: you should prefer using the
+ // typed versions above which cast the return to an appropriate type.
+ uint8_t GetRawOperand(int operand_index, OperandType operand_type) const;
+
+ private:
+ Handle<BytecodeArray> bytecode_array_;
+ int bytecode_offset_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeArrayIterator);
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_GRAPH_ITERATOR_H_
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 9cce681ad4..7257fd4134 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -16,7 +16,7 @@ namespace internal {
namespace interpreter {
BytecodeGenerator::BytecodeGenerator(Isolate* isolate, Zone* zone)
- : builder_(isolate) {
+ : builder_(isolate, zone) {
InitializeAstVisitor(isolate, zone);
}
@@ -25,11 +25,13 @@ BytecodeGenerator::~BytecodeGenerator() {}
Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
+ set_info(info);
set_scope(info->scope());
// This a temporary guard (oth).
DCHECK(scope()->is_function_scope());
+ builder().set_parameter_count(info->num_parameters_including_this());
builder().set_locals_count(scope()->num_stack_slots());
// Visit implicit declaration of the function name.
@@ -44,11 +46,13 @@ Handle<BytecodeArray> BytecodeGenerator::MakeBytecode(CompilationInfo* info) {
VisitStatements(info->literal()->body());
set_scope(nullptr);
+ set_info(nullptr);
return builder_.ToBytecodeArray();
}
void BytecodeGenerator::VisitBlock(Block* node) {
+ builder().EnterBlock();
if (node->scope() == NULL) {
// Visit statements in the same scope, no declarations.
VisitStatements(node->statements());
@@ -61,6 +65,7 @@ void BytecodeGenerator::VisitBlock(Block* node) {
VisitStatements(node->statements());
}
}
+ builder().LeaveBlock();
}
@@ -72,8 +77,6 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
UNIMPLEMENTED();
break;
case VariableLocation::PARAMETER:
- UNIMPLEMENTED();
- break;
case VariableLocation::LOCAL:
// Details stored in scope, i.e. variable index.
break;
@@ -85,17 +88,17 @@ void BytecodeGenerator::VisitVariableDeclaration(VariableDeclaration* decl) {
}
-void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* node) {
+void BytecodeGenerator::VisitFunctionDeclaration(FunctionDeclaration* decl) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* node) {
+void BytecodeGenerator::VisitImportDeclaration(ImportDeclaration* decl) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* node) {
+void BytecodeGenerator::VisitExportDeclaration(ExportDeclaration* decl) {
UNIMPLEMENTED();
}
@@ -105,36 +108,57 @@ void BytecodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
}
-void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
- UNIMPLEMENTED();
+void BytecodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
+ // TODO(oth): For control-flow it could be useful to signal empty paths here.
}
-void BytecodeGenerator::VisitIfStatement(IfStatement* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitIfStatement(IfStatement* stmt) {
+ BytecodeLabel else_start, else_end;
+ // TODO(oth): Spot easy cases where there code would not need to
+ // emit the then block or the else block, e.g. condition is
+ // obviously true/1/false/0.
+ Visit(stmt->condition());
+ builder().CastAccumulatorToBoolean();
+ builder().JumpIfFalse(&else_start);
+
+ Visit(stmt->then_statement());
+ builder().Jump(&else_end);
+ builder().Bind(&else_start);
+
+ Visit(stmt->else_statement());
+ builder().Bind(&else_end);
+}
-void BytecodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+void BytecodeGenerator::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
+void BytecodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitBreakStatement(BreakStatement* node) {
+void BytecodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- Visit(node->expression());
+void BytecodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
+ Visit(stmt->expression());
builder().Return();
}
-void BytecodeGenerator::VisitWithStatement(WithStatement* node) {
+void BytecodeGenerator::VisitWithStatement(WithStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
UNIMPLEMENTED();
}
@@ -142,70 +166,66 @@ void BytecodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
void BytecodeGenerator::VisitCaseClause(CaseClause* clause) { UNIMPLEMENTED(); }
-void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
+void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitWhileStatement(WhileStatement* node) {
+void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitForStatement(ForStatement* node) {
+void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitForInStatement(ForInStatement* node) {
+void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitForOfStatement(ForOfStatement* node) {
+void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
+void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
+void BytecodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+void BytecodeGenerator::VisitDebuggerStatement(DebuggerStatement* stmt) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitClassLiteral(ClassLiteral* node) {
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
UNIMPLEMENTED();
}
void BytecodeGenerator::VisitNativeFunctionLiteral(
- NativeFunctionLiteral* node) {
+ NativeFunctionLiteral* expr) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitConditional(Conditional* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitConditional(Conditional* expr) { UNIMPLEMENTED(); }
void BytecodeGenerator::VisitLiteral(Literal* expr) {
- if (expr->IsPropertyName()) {
- UNIMPLEMENTED();
- }
-
Handle<Object> value = expr->value();
if (value->IsSmi()) {
builder().LoadLiteral(Smi::cast(*value));
@@ -220,37 +240,55 @@ void BytecodeGenerator::VisitLiteral(Literal* expr) {
} else if (value->IsTheHole()) {
builder().LoadTheHole();
} else {
- UNIMPLEMENTED();
+ builder().LoadLiteral(value);
}
}
-void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+void BytecodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
UNIMPLEMENTED();
}
void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
- Variable* variable = proxy->var();
+ VisitVariableLoad(proxy->var());
+}
+
+
+void BytecodeGenerator::VisitVariableLoad(Variable* variable) {
switch (variable->location()) {
case VariableLocation::LOCAL: {
Register source(variable->index());
builder().LoadAccumulatorWithRegister(source);
break;
}
- case VariableLocation::GLOBAL:
+ case VariableLocation::PARAMETER: {
+ // The parameter indices are shifted by 1 (receiver is variable
+ // index -1 but is parameter index 0 in BytecodeArrayBuilder).
+ Register source(builder().Parameter(variable->index() + 1));
+ builder().LoadAccumulatorWithRegister(source);
+ break;
+ }
+ case VariableLocation::GLOBAL: {
+ // Global var, const, or let variable.
+ // TODO(rmcilroy): If context chain depth is short enough, do this using
+ // a generic version of LoadGlobalViaContextStub rather than calling the
+ // runtime.
+ DCHECK(variable->IsStaticGlobalObjectProperty());
+ builder().LoadGlobal(variable->index());
+ break;
+ }
case VariableLocation::UNALLOCATED:
- case VariableLocation::PARAMETER:
case VariableLocation::CONTEXT:
case VariableLocation::LOOKUP:
UNIMPLEMENTED();
@@ -260,14 +298,49 @@ void BytecodeGenerator::VisitVariableProxy(VariableProxy* proxy) {
void BytecodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpression());
+ TemporaryRegisterScope temporary_register_scope(&builder_);
+ Register object, key;
// Left-hand side can only be a property, a global or a variable slot.
Property* property = expr->target()->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
- DCHECK(!expr->is_compound());
- Visit(expr->value());
+ // Evaluate LHS expression.
+ switch (assign_type) {
+ case VARIABLE:
+ // Nothing to do to evaluate variable assignment LHS.
+ break;
+ case NAMED_PROPERTY:
+ object = temporary_register_scope.NewRegister();
+ key = temporary_register_scope.NewRegister();
+ Visit(property->obj());
+ builder().StoreAccumulatorInRegister(object);
+ builder().LoadLiteral(property->key()->AsLiteral()->AsPropertyName());
+ builder().StoreAccumulatorInRegister(key);
+ break;
+ case KEYED_PROPERTY:
+ object = temporary_register_scope.NewRegister();
+ key = temporary_register_scope.NewRegister();
+ Visit(property->obj());
+ builder().StoreAccumulatorInRegister(object);
+ Visit(property->key());
+ builder().StoreAccumulatorInRegister(key);
+ break;
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+
+ // Evaluate the value and potentially handle compound assignments by loading
+ // the left-hand side value and performing a binary operation.
+ if (expr->is_compound()) {
+ UNIMPLEMENTED();
+ } else {
+ Visit(expr->value());
+ }
+ // Store the value.
+ FeedbackVectorICSlot slot = expr->AssignmentSlot();
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
@@ -277,7 +350,13 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
break;
}
case NAMED_PROPERTY:
+ builder().StoreNamedProperty(object, key, feedback_index(slot),
+ language_mode());
+ break;
case KEYED_PROPERTY:
+ builder().StoreKeyedProperty(object, key, feedback_index(slot),
+ language_mode());
+ break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
UNIMPLEMENTED();
@@ -285,30 +364,111 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
-void BytecodeGenerator::VisitYield(Yield* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitYield(Yield* expr) { UNIMPLEMENTED(); }
-void BytecodeGenerator::VisitThrow(Throw* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitThrow(Throw* expr) { UNIMPLEMENTED(); }
-void BytecodeGenerator::VisitProperty(Property* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitPropertyLoad(Register obj, Property* expr) {
+ LhsKind property_kind = Property::GetAssignType(expr);
+ FeedbackVectorICSlot slot = expr->PropertyFeedbackSlot();
+ switch (property_kind) {
+ case VARIABLE:
+ UNREACHABLE();
+ case NAMED_PROPERTY: {
+ builder().LoadLiteral(expr->key()->AsLiteral()->AsPropertyName());
+ builder().LoadNamedProperty(obj, feedback_index(slot), language_mode());
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Visit(expr->key());
+ builder().LoadKeyedProperty(obj, feedback_index(slot), language_mode());
+ break;
+ }
+ case NAMED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY:
+ UNIMPLEMENTED();
+ }
+}
-void BytecodeGenerator::VisitCall(Call* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitProperty(Property* expr) {
+ TemporaryRegisterScope temporary_register_scope(&builder_);
+ Register obj = temporary_register_scope.NewRegister();
+ Visit(expr->obj());
+ builder().StoreAccumulatorInRegister(obj);
+ VisitPropertyLoad(obj, expr);
+}
-void BytecodeGenerator::VisitCallNew(CallNew* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitCall(Call* expr) {
+ Expression* callee_expr = expr->expression();
+ Call::CallType call_type = expr->GetCallType(isolate());
+ // Prepare the callee and the receiver to the function call. This depends on
+ // the semantics of the underlying call type.
+ TemporaryRegisterScope temporary_register_scope(&builder_);
+ Register callee = temporary_register_scope.NewRegister();
+ Register receiver = temporary_register_scope.NewRegister();
+
+ switch (call_type) {
+ case Call::PROPERTY_CALL: {
+ Property* property = callee_expr->AsProperty();
+ if (property->IsSuperAccess()) {
+ UNIMPLEMENTED();
+ }
+ Visit(property->obj());
+ builder().StoreAccumulatorInRegister(receiver);
+ // Perform a property load of the callee.
+ VisitPropertyLoad(receiver, property);
+ builder().StoreAccumulatorInRegister(callee);
+ break;
+ }
+ case Call::GLOBAL_CALL: {
+ // Receiver is undefined for global calls.
+ builder().LoadUndefined().StoreAccumulatorInRegister(receiver);
+ // Load callee as a global variable.
+ VariableProxy* proxy = callee_expr->AsVariableProxy();
+ VisitVariableLoad(proxy->var());
+ builder().StoreAccumulatorInRegister(callee);
+ break;
+ }
+ case Call::LOOKUP_SLOT_CALL:
+ case Call::SUPER_CALL:
+ case Call::POSSIBLY_EVAL_CALL:
+ case Call::OTHER_CALL:
+ UNIMPLEMENTED();
+ }
+
+ // Evaluate all arguments to the function call and store in sequential
+ // registers.
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Visit(args->at(i));
+ Register arg = temporary_register_scope.NewRegister();
+ DCHECK(arg.index() - i == receiver.index() + 1);
+ builder().StoreAccumulatorInRegister(arg);
+ }
+
+ // TODO(rmcilroy): Deal with possible direct eval here?
+ // TODO(rmcilroy): Use CallIC to allow call type feedback.
+ builder().Call(callee, receiver, args->length());
+}
-void BytecodeGenerator::VisitCallRuntime(CallRuntime* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitCallNew(CallNew* expr) { UNIMPLEMENTED(); }
-void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+
+void BytecodeGenerator::VisitCallRuntime(CallRuntime* expr) { UNIMPLEMENTED(); }
+
+
+void BytecodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitCountOperation(CountOperation* node) {
+void BytecodeGenerator::VisitCountOperation(CountOperation* expr) {
UNIMPLEMENTED();
}
@@ -327,26 +487,41 @@ void BytecodeGenerator::VisitBinaryOperation(BinaryOperation* binop) {
}
-void BytecodeGenerator::VisitCompareOperation(CompareOperation* node) {
- UNIMPLEMENTED();
+void BytecodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+ Token::Value op = expr->op();
+ Expression* left = expr->left();
+ Expression* right = expr->right();
+
+ TemporaryRegisterScope temporary_register_scope(&builder_);
+ Register temporary = temporary_register_scope.NewRegister();
+
+ Visit(left);
+ builder().StoreAccumulatorInRegister(temporary);
+ Visit(right);
+ builder().CompareOperation(op, temporary, language_mode());
}
-void BytecodeGenerator::VisitSpread(Spread* node) { UNIMPLEMENTED(); }
+void BytecodeGenerator::VisitSpread(Spread* expr) { UNREACHABLE(); }
+
+void BytecodeGenerator::VisitEmptyParentheses(EmptyParentheses* expr) {
+ UNREACHABLE();
+}
-void BytecodeGenerator::VisitThisFunction(ThisFunction* node) {
+
+void BytecodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNIMPLEMENTED();
}
-void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* node) {
+void BytecodeGenerator::VisitSuperCallReference(SuperCallReference* expr) {
UNIMPLEMENTED();
}
void BytecodeGenerator::VisitSuperPropertyReference(
- SuperPropertyReference* node) {
+ SuperPropertyReference* expr) {
UNIMPLEMENTED();
}
@@ -365,6 +540,16 @@ void BytecodeGenerator::VisitArithmeticExpression(BinaryOperation* binop) {
builder().BinaryOperation(op, temporary);
}
+
+LanguageMode BytecodeGenerator::language_mode() const {
+ return info()->language_mode();
+}
+
+
+int BytecodeGenerator::feedback_index(FeedbackVectorICSlot slot) const {
+ return info()->feedback_vector()->GetIndex(slot);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 5caf3f1813..99536c33fb 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -28,12 +28,20 @@ class BytecodeGenerator : public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
void VisitArithmeticExpression(BinaryOperation* binop);
+ void VisitPropertyLoad(Register obj, Property* expr);
+ void VisitVariableLoad(Variable* variable);
inline BytecodeArrayBuilder& builder() { return builder_; }
inline Scope* scope() const { return scope_; }
- inline void set_scope(Scope* s) { scope_ = s; }
+ inline void set_scope(Scope* scope) { scope_ = scope; }
+ inline CompilationInfo* info() const { return info_; }
+ inline void set_info(CompilationInfo* info) { info_ = info; }
+
+ LanguageMode language_mode() const;
+ int feedback_index(FeedbackVectorICSlot slot) const;
BytecodeArrayBuilder builder_;
+ CompilationInfo* info_;
Scope* scope_;
};
diff --git a/deps/v8/src/interpreter/bytecodes.cc b/deps/v8/src/interpreter/bytecodes.cc
index 8232b657e7..e5b9ab73a9 100644
--- a/deps/v8/src/interpreter/bytecodes.cc
+++ b/deps/v8/src/interpreter/bytecodes.cc
@@ -4,7 +4,7 @@
#include "src/interpreter/bytecodes.h"
-#include "src/interpreter/bytecode-array-builder.h"
+#include "src/frames.h"
namespace v8 {
namespace internal {
@@ -103,8 +103,8 @@ int Bytecodes::MaximumSize() { return 1 + kMaxOperands; }
// static
-std::ostream& Bytecodes::Decode(std::ostream& os,
- const uint8_t* bytecode_start) {
+std::ostream& Bytecodes::Decode(std::ostream& os, const uint8_t* bytecode_start,
+ int parameter_count) {
Vector<char> buf = Vector<char>::New(50);
Bytecode bytecode = Bytecodes::FromByte(bytecode_start[0]);
@@ -126,12 +126,29 @@ std::ostream& Bytecodes::Decode(std::ostream& os,
OperandType op_type = GetOperandType(bytecode, i);
uint8_t operand = operands_start[i];
switch (op_type) {
+ case interpreter::OperandType::kCount:
+ os << "#" << static_cast<unsigned int>(operand);
+ break;
+ case interpreter::OperandType::kIdx:
+ os << "[" << static_cast<unsigned int>(operand) << "]";
+ break;
case interpreter::OperandType::kImm8:
- os << "#" << static_cast<int>(operand);
+ os << "#" << static_cast<int>(static_cast<int8_t>(operand));
break;
- case interpreter::OperandType::kReg:
- os << "r" << Register::FromOperand(operand).index();
+ case interpreter::OperandType::kReg: {
+ Register reg = Register::FromOperand(operand);
+ if (reg.is_parameter()) {
+ int parameter_index = reg.ToParameterIndex(parameter_count);
+ if (parameter_index == 0) {
+ os << "<this>";
+ } else {
+ os << "a" << parameter_index - 1;
+ }
+ } else {
+ os << "r" << reg.index();
+ }
break;
+ }
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -153,6 +170,44 @@ std::ostream& operator<<(std::ostream& os, const OperandType& operand_type) {
return os << Bytecodes::OperandTypeToString(operand_type);
}
+
+static const int kLastParamRegisterIndex =
+ -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
+
+
+// Registers occupy range 0-127 in 8-bit value leaving 128 unused values.
+// Parameter indices are biased with the negative value kLastParamRegisterIndex
+// for ease of access in the interpreter.
+static const int kMaxParameterIndex = 128 + kLastParamRegisterIndex;
+
+
+Register Register::FromParameterIndex(int index, int parameter_count) {
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, parameter_count);
+ DCHECK_LE(parameter_count, kMaxParameterIndex + 1);
+ int register_index = kLastParamRegisterIndex - parameter_count + index + 1;
+ DCHECK_LT(register_index, 0);
+ DCHECK_GE(register_index, Register::kMinRegisterIndex);
+ return Register(register_index);
+}
+
+
+int Register::ToParameterIndex(int parameter_count) const {
+ DCHECK(is_parameter());
+ return index() - kLastParamRegisterIndex + parameter_count - 1;
+}
+
+
+int Register::MaxParameterIndex() { return kMaxParameterIndex; }
+
+
+uint8_t Register::ToOperand() const { return static_cast<uint8_t>(-index_); }
+
+
+Register Register::FromOperand(uint8_t operand) {
+ return Register(-static_cast<int8_t>(operand));
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index fec6ecf6aa..3862842277 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -18,32 +18,71 @@ namespace interpreter {
// The list of operand types used by bytecodes.
#define OPERAND_TYPE_LIST(V) \
V(None) \
+ V(Count) \
V(Imm8) \
+ V(Idx) \
V(Reg)
// The list of bytecodes which are interpreted by the interpreter.
-#define BYTECODE_LIST(V) \
- \
- /* Loading the accumulator */ \
- V(LdaZero, OperandType::kNone) \
- V(LdaSmi8, OperandType::kImm8) \
- V(LdaUndefined, OperandType::kNone) \
- V(LdaNull, OperandType::kNone) \
- V(LdaTheHole, OperandType::kNone) \
- V(LdaTrue, OperandType::kNone) \
- V(LdaFalse, OperandType::kNone) \
- \
- /* Register-accumulator transfers */ \
- V(Ldar, OperandType::kReg) \
- V(Star, OperandType::kReg) \
- \
- /* Binary Operators */ \
- V(Add, OperandType::kReg) \
- V(Sub, OperandType::kReg) \
- V(Mul, OperandType::kReg) \
- V(Div, OperandType::kReg) \
- \
- /* Control Flow */ \
+#define BYTECODE_LIST(V) \
+ \
+ /* Loading the accumulator */ \
+ V(LdaZero, OperandType::kNone) \
+ V(LdaSmi8, OperandType::kImm8) \
+ V(LdaConstant, OperandType::kIdx) \
+ V(LdaUndefined, OperandType::kNone) \
+ V(LdaNull, OperandType::kNone) \
+ V(LdaTheHole, OperandType::kNone) \
+ V(LdaTrue, OperandType::kNone) \
+ V(LdaFalse, OperandType::kNone) \
+ \
+ /* Load globals */ \
+ V(LdaGlobal, OperandType::kIdx) \
+ \
+ /* Register-accumulator transfers */ \
+ V(Ldar, OperandType::kReg) \
+ V(Star, OperandType::kReg) \
+ \
+ /* LoadIC operations */ \
+ V(LoadIC, OperandType::kReg, OperandType::kIdx) \
+ V(KeyedLoadIC, OperandType::kReg, OperandType::kIdx) \
+ \
+ /* StoreIC operations */ \
+ V(StoreIC, OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
+ V(KeyedStoreIC, OperandType::kReg, OperandType::kReg, OperandType::kIdx) \
+ \
+ /* Binary Operators */ \
+ V(Add, OperandType::kReg) \
+ V(Sub, OperandType::kReg) \
+ V(Mul, OperandType::kReg) \
+ V(Div, OperandType::kReg) \
+ V(Mod, OperandType::kReg) \
+ \
+ /* Call operations. */ \
+ V(Call, OperandType::kReg, OperandType::kReg, OperandType::kCount) \
+ \
+ /* Test Operators */ \
+ V(TestEqual, OperandType::kReg) \
+ V(TestNotEqual, OperandType::kReg) \
+ V(TestEqualStrict, OperandType::kReg) \
+ V(TestNotEqualStrict, OperandType::kReg) \
+ V(TestLessThan, OperandType::kReg) \
+ V(TestGreaterThan, OperandType::kReg) \
+ V(TestLessThanOrEqual, OperandType::kReg) \
+ V(TestGreaterThanOrEqual, OperandType::kReg) \
+ V(TestInstanceOf, OperandType::kReg) \
+ V(TestIn, OperandType::kReg) \
+ \
+ /* Cast operators */ \
+ V(ToBoolean, OperandType::kNone) \
+ \
+ /* Control Flow */ \
+ V(Jump, OperandType::kImm8) \
+ V(JumpConstant, OperandType::kIdx) \
+ V(JumpIfTrue, OperandType::kImm8) \
+ V(JumpIfTrueConstant, OperandType::kIdx) \
+ V(JumpIfFalse, OperandType::kImm8) \
+ V(JumpIfFalseConstant, OperandType::kIdx) \
V(Return, OperandType::kNone)
@@ -73,6 +112,43 @@ enum class Bytecode : uint8_t {
};
+// An interpreter register which is located in the function's register file
+// in its stack-frame. Register hold parameters, this, and expression values.
+class Register {
+ public:
+ static const int kMaxRegisterIndex = 127;
+ static const int kMinRegisterIndex = -128;
+
+ Register() : index_(kIllegalIndex) {}
+
+ explicit Register(int index) : index_(index) {
+ DCHECK_LE(index_, kMaxRegisterIndex);
+ DCHECK_GE(index_, kMinRegisterIndex);
+ }
+
+ int index() const {
+ DCHECK(index_ != kIllegalIndex);
+ return index_;
+ }
+ bool is_parameter() const { return index() < 0; }
+
+ static Register FromParameterIndex(int index, int parameter_count);
+ int ToParameterIndex(int parameter_count) const;
+ static int MaxParameterIndex();
+
+ static Register FromOperand(uint8_t operand);
+ uint8_t ToOperand() const;
+
+ private:
+ static const int kIllegalIndex = kMaxInt;
+
+ void* operator new(size_t size);
+ void operator delete(void* p);
+
+ int index_;
+};
+
+
class Bytecodes {
public:
// Returns string representation of |bytecode|.
@@ -103,7 +179,8 @@ class Bytecodes {
static int MaximumSize();
// Decode a single bytecode and operands to |os|.
- static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start);
+ static std::ostream& Decode(std::ostream& os, const uint8_t* bytecode_start,
+ int number_of_parameters);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(Bytecodes);
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 565fa0c443..2d97fc8ef2 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -4,6 +4,7 @@
#include "src/interpreter/interpreter.h"
+#include "src/code-factory.h"
#include "src/compiler.h"
#include "src/compiler/interpreter-assembler.h"
#include "src/factory.h"
@@ -61,6 +62,7 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
Handle<SharedFunctionInfo> shared_info = info->shared_info();
BytecodeGenerator generator(info->isolate(), info->zone());
+ info->EnsureFeedbackVector();
Handle<BytecodeArray> bytecodes = generator.MakeBytecode(info);
if (FLAG_print_bytecode) {
bytecodes->Print();
@@ -73,7 +75,6 @@ bool Interpreter::MakeBytecode(CompilationInfo* info) {
shared_info->set_function_data(*bytecodes);
info->SetCode(info->isolate()->builtins()->InterpreterEntryTrampoline());
- info->EnsureFeedbackVector();
return true;
}
@@ -106,12 +107,23 @@ void Interpreter::DoLdaSmi8(compiler::InterpreterAssembler* assembler) {
}
+// LdaConstant <idx>
+//
+// Load constant literal at |idx| in the constant pool into the accumulator.
+void Interpreter::DoLdaConstant(compiler::InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ __ SetAccumulator(constant);
+ __ Dispatch();
+}
+
+
// LdaUndefined
//
// Load Undefined into the accumulator.
void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
- Node* undefined_value = __ HeapConstant(Unique<HeapObject>::CreateImmovable(
- isolate_->factory()->undefined_value()));
+ Node* undefined_value =
+ __ HeapConstant(isolate_->factory()->undefined_value());
__ SetAccumulator(undefined_value);
__ Dispatch();
}
@@ -121,8 +133,7 @@ void Interpreter::DoLdaUndefined(compiler::InterpreterAssembler* assembler) {
//
// Load Null into the accumulator.
void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
- Node* null_value = __ HeapConstant(
- Unique<HeapObject>::CreateImmovable(isolate_->factory()->null_value()));
+ Node* null_value = __ HeapConstant(isolate_->factory()->null_value());
__ SetAccumulator(null_value);
__ Dispatch();
}
@@ -132,8 +143,7 @@ void Interpreter::DoLdaNull(compiler::InterpreterAssembler* assembler) {
//
// Load TheHole into the accumulator.
void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
- Node* the_hole_value = __ HeapConstant(Unique<HeapObject>::CreateImmovable(
- isolate_->factory()->the_hole_value()));
+ Node* the_hole_value = __ HeapConstant(isolate_->factory()->the_hole_value());
__ SetAccumulator(the_hole_value);
__ Dispatch();
}
@@ -143,8 +153,7 @@ void Interpreter::DoLdaTheHole(compiler::InterpreterAssembler* assembler) {
//
// Load True into the accumulator.
void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
- Node* true_value = __ HeapConstant(
- Unique<HeapObject>::CreateImmovable(isolate_->factory()->true_value()));
+ Node* true_value = __ HeapConstant(isolate_->factory()->true_value());
__ SetAccumulator(true_value);
__ Dispatch();
}
@@ -154,8 +163,7 @@ void Interpreter::DoLdaTrue(compiler::InterpreterAssembler* assembler) {
//
// Load False into the accumulator.
void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
- Node* false_value = __ HeapConstant(
- Unique<HeapObject>::CreateImmovable(isolate_->factory()->false_value()));
+ Node* false_value = __ HeapConstant(isolate_->factory()->false_value());
__ SetAccumulator(false_value);
__ Dispatch();
}
@@ -165,7 +173,8 @@ void Interpreter::DoLdaFalse(compiler::InterpreterAssembler* assembler) {
//
// Load accumulator with value from register <src>.
void Interpreter::DoLdar(compiler::InterpreterAssembler* assembler) {
- Node* value = __ LoadRegister(__ BytecodeOperandReg(0));
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* value = __ LoadRegister(reg_index);
__ SetAccumulator(value);
__ Dispatch();
}
@@ -182,12 +191,114 @@ void Interpreter::DoStar(compiler::InterpreterAssembler* assembler) {
}
+// LdaGlobal <slot_index>
+//
+// Load the global at |slot_index| into the accumulator.
+void Interpreter::DoLdaGlobal(compiler::InterpreterAssembler* assembler) {
+ Node* slot_index = __ BytecodeOperandIdx(0);
+ Node* smi_slot_index = __ SmiTag(slot_index);
+ Node* result = __ CallRuntime(Runtime::kLoadGlobalViaContext, smi_slot_index);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+void Interpreter::DoPropertyLoadIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(reg_index);
+ Node* name = __ GetAccumulator();
+ Node* raw_slot = __ BytecodeOperandIdx(1);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = __ CallIC(ic.descriptor(), code_target, object, name, smi_slot,
+ type_feedback_vector);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// LoadIC <object> <slot>
+//
+// Calls the LoadIC at FeedBackVector slot <slot> for <object> and the name
+// in the accumulator.
+void Interpreter::DoLoadIC(compiler::InterpreterAssembler* assembler) {
+ Callable ic = CodeFactory::LoadICInOptimizedCode(isolate_, NOT_INSIDE_TYPEOF,
+ SLOPPY, UNINITIALIZED);
+ DoPropertyLoadIC(ic, assembler);
+}
+
+
+// KeyedLoadIC <object> <slot>
+//
+// Calls the KeyedLoadIC at FeedBackVector slot <slot> for <object> and the key
+// in the accumulator.
+void Interpreter::DoKeyedLoadIC(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoPropertyLoadIC(ic, assembler);
+}
+
+
+void Interpreter::DoPropertyStoreIC(Callable ic,
+ compiler::InterpreterAssembler* assembler) {
+ Node* code_target = __ HeapConstant(ic.code());
+ Node* object_reg_index = __ BytecodeOperandReg(0);
+ Node* object = __ LoadRegister(object_reg_index);
+ Node* name_reg_index = __ BytecodeOperandReg(1);
+ Node* name = __ LoadRegister(name_reg_index);
+ Node* value = __ GetAccumulator();
+ Node* raw_slot = __ BytecodeOperandIdx(2);
+ Node* smi_slot = __ SmiTag(raw_slot);
+ Node* type_feedback_vector = __ LoadTypeFeedbackVector();
+ Node* result = __ CallIC(ic.descriptor(), code_target, object, name, value,
+ smi_slot, type_feedback_vector);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
+// StoreIC <object> <name> <slot>
+//
+// Calls the StoreIC at FeedBackVector slot <slot> for <object> and the name
+// <name> with the value in the accumulator.
+void Interpreter::DoStoreIC(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::StoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoPropertyStoreIC(ic, assembler);
+}
+
+
+// KeyedStoreIC <object> <key> <slot>
+//
+// Calls the KeyStoreIC at FeedBackVector slot <slot> for <object> and the key
+// <key> with the value in the accumulator.
+void Interpreter::DoKeyedStoreIC(compiler::InterpreterAssembler* assembler) {
+ Callable ic =
+ CodeFactory::KeyedStoreICInOptimizedCode(isolate_, SLOPPY, UNINITIALIZED);
+ DoPropertyStoreIC(ic, assembler);
+}
+
+
+void Interpreter::DoBinaryOp(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler) {
+ // TODO(rmcilroy): Call ICs which back-patch bytecode with type specialized
+ // operations, instead of calling builtins directly.
+ Node* reg_index = __ BytecodeOperandReg(0);
+ Node* lhs = __ LoadRegister(reg_index);
+ Node* rhs = __ GetAccumulator();
+ Node* result = __ CallRuntime(function_id, lhs, rhs);
+ __ SetAccumulator(result);
+ __ Dispatch();
+}
+
+
// Add <src>
//
// Add register <src> to accumulator.
void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
- // TODO(rmcilroy) Implement.
- __ Dispatch();
+ DoBinaryOp(Runtime::kAdd, assembler);
}
@@ -195,8 +306,7 @@ void Interpreter::DoAdd(compiler::InterpreterAssembler* assembler) {
//
// Subtract register <src> from accumulator.
void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
- // TODO(rmcilroy) Implement.
- __ Dispatch();
+ DoBinaryOp(Runtime::kSubtract, assembler);
}
@@ -204,8 +314,7 @@ void Interpreter::DoSub(compiler::InterpreterAssembler* assembler) {
//
// Multiply accumulator by register <src>.
void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
- // TODO(rmcilroy) Implement add register to accumulator.
- __ Dispatch();
+ DoBinaryOp(Runtime::kMultiply, assembler);
}
@@ -213,11 +322,206 @@ void Interpreter::DoMul(compiler::InterpreterAssembler* assembler) {
//
// Divide register <src> by accumulator.
void Interpreter::DoDiv(compiler::InterpreterAssembler* assembler) {
- // TODO(rmcilroy) Implement.
+ DoBinaryOp(Runtime::kDivide, assembler);
+}
+
+
+// Mod <src>
+//
+// Modulo register <src> by accumulator.
+void Interpreter::DoMod(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kModulus, assembler);
+}
+
+
+// Call <receiver> <arg_count>
+//
+// Call a JS function with receiver and |arg_count| arguments in subsequent
+// registers. The JSfunction or Callable to call is in the accumulator.
+void Interpreter::DoCall(compiler::InterpreterAssembler* assembler) {
+ Node* function_reg = __ BytecodeOperandReg(0);
+ Node* function = __ LoadRegister(function_reg);
+ Node* receiver_reg = __ BytecodeOperandReg(1);
+ Node* first_arg = __ RegisterLocation(receiver_reg);
+ Node* args_count = __ BytecodeOperandCount(2);
+ Node* result = __ CallJS(function, first_arg, args_count);
+ __ SetAccumulator(result);
__ Dispatch();
}
+// TestEqual <src>
+//
+// Test if the value in the <src> register equals the accumulator.
+void Interpreter::DoTestEqual(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterEquals, assembler);
+}
+
+
+// TestNotEqual <src>
+//
+// Test if the value in the <src> register is not equal to the accumulator.
+void Interpreter::DoTestNotEqual(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterNotEquals, assembler);
+}
+
+
+// TestEqualStrict <src>
+//
+// Test if the value in the <src> register is strictly equal to the accumulator.
+void Interpreter::DoTestEqualStrict(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterStrictEquals, assembler);
+}
+
+
+// TestNotEqualStrict <src>
+//
+// Test if the value in the <src> register is not strictly equal to the
+// accumulator.
+void Interpreter::DoTestNotEqualStrict(
+ compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterStrictNotEquals, assembler);
+}
+
+
+// TestLessThan <src>
+//
+// Test if the value in the <src> register is less than the accumulator.
+void Interpreter::DoTestLessThan(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterLessThan, assembler);
+}
+
+
+// TestGreaterThan <src>
+//
+// Test if the value in the <src> register is greater than the accumulator.
+void Interpreter::DoTestGreaterThan(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterGreaterThan, assembler);
+}
+
+
+// TestLessThanOrEqual <src>
+//
+// Test if the value in the <src> register is less than or equal to the
+// accumulator.
+void Interpreter::DoTestLessThanOrEqual(
+ compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterLessThanOrEqual, assembler);
+}
+
+
+// TestGreaterThanOrEqual <src>
+//
+// Test if the value in the <src> register is greater than or equal to the
+// accumulator.
+void Interpreter::DoTestGreaterThanOrEqual(
+ compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInterpreterGreaterThanOrEqual, assembler);
+}
+
+
+// TestIn <src>
+//
+// Test if the object referenced by the register operand is a property of the
+// object referenced by the accumulator.
+void Interpreter::DoTestIn(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kHasProperty, assembler);
+}
+
+
+// TestInstanceOf <src>
+//
+// Test if the object referenced by the <src> register is an an instance of type
+// referenced by the accumulator.
+void Interpreter::DoTestInstanceOf(compiler::InterpreterAssembler* assembler) {
+ DoBinaryOp(Runtime::kInstanceOf, assembler);
+}
+
+
+// ToBoolean
+//
+// Cast the object referenced by the accumulator to a boolean.
+void Interpreter::DoToBoolean(compiler::InterpreterAssembler* assembler) {
+ // TODO(oth): The next CL for test operations has interpreter specific
+ // runtime calls. This looks like another candidate.
+ __ Dispatch();
+}
+
+
+// Jump <imm8>
+//
+// Jump by number of bytes represented by an immediate operand.
+void Interpreter::DoJump(compiler::InterpreterAssembler* assembler) {
+ Node* relative_jump = __ BytecodeOperandImm8(0);
+ __ Jump(relative_jump);
+}
+
+
+// JumpConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
+void Interpreter::DoJumpConstant(compiler::InterpreterAssembler* assembler) {
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ __ Jump(relative_jump);
+}
+
+
+// JumpIfTrue <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the
+// accumulator contains true.
+void Interpreter::DoJumpIfTrue(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* true_value = __ BooleanConstant(true);
+ __ JumpIfWordEqual(accumulator, true_value, relative_jump);
+}
+
+
+// JumpIfTrueConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// if the accumulator contains true.
+void Interpreter::DoJumpIfTrueConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ Node* true_value = __ BooleanConstant(true);
+ __ JumpIfWordEqual(accumulator, true_value, relative_jump);
+}
+
+
+// JumpIfFalse <imm8>
+//
+// Jump by number of bytes represented by an immediate operand if the
+// accumulator contains false.
+void Interpreter::DoJumpIfFalse(compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* relative_jump = __ BytecodeOperandImm8(0);
+ Node* false_value = __ BooleanConstant(false);
+ __ JumpIfWordEqual(accumulator, false_value, relative_jump);
+}
+
+
+// JumpIfFalseConstant <idx>
+//
+// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
+// if the accumulator contains false.
+void Interpreter::DoJumpIfFalseConstant(
+ compiler::InterpreterAssembler* assembler) {
+ Node* accumulator = __ GetAccumulator();
+ Node* index = __ BytecodeOperandIdx(0);
+ Node* constant = __ LoadConstantPoolEntry(index);
+ Node* relative_jump = __ SmiUntag(constant);
+ Node* false_value = __ BooleanConstant(false);
+ __ JumpIfWordEqual(accumulator, false_value, relative_jump);
+}
+
+
// Return
//
// Return the value in register 0.
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 64101de657..c32b6831d0 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -9,12 +9,16 @@
// Do not include anything from src/interpreter other than
// src/interpreter/bytecodes.h here!
#include "src/base/macros.h"
+#include "src/builtins.h"
#include "src/interpreter/bytecodes.h"
+#include "src/runtime/runtime.h"
+#include "src/token.h"
namespace v8 {
namespace internal {
class Isolate;
+class Callable;
class CompilationInfo;
namespace compiler {
@@ -46,6 +50,22 @@ class Interpreter {
BYTECODE_LIST(DECLARE_BYTECODE_HANDLER_GENERATOR)
#undef DECLARE_BYTECODE_HANDLER_GENERATOR
+ // Generates code to perform the binary operations via |function_id|.
+ void DoBinaryOp(Runtime::FunctionId function_id,
+ compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform the comparison operation associated with
+ // |compare_op|.
+ void DoCompareOp(Token::Value compare_op,
+ compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a property load via |ic|.
+ void DoPropertyLoadIC(Callable ic, compiler::InterpreterAssembler* assembler);
+
+ // Generates code to perform a property store via |ic|.
+ void DoPropertyStoreIC(Callable ic,
+ compiler::InterpreterAssembler* assembler);
+
bool IsInterpreterTableInitialized(Handle<FixedArray> handler_table);
Isolate* isolate_;
diff --git a/deps/v8/src/isolate-inl.h b/deps/v8/src/isolate-inl.h
new file mode 100644
index 0000000000..c281c24639
--- /dev/null
+++ b/deps/v8/src/isolate-inl.h
@@ -0,0 +1,104 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ISOLATE_INL_H_
+#define V8_ISOLATE_INL_H_
+
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+void Isolate::set_context(Context* context) {
+ DCHECK(context == NULL || context->IsContext());
+ thread_local_top_.context_ = context;
+}
+
+
+Object* Isolate::pending_exception() {
+ DCHECK(has_pending_exception());
+ DCHECK(!thread_local_top_.pending_exception_->IsException());
+ return thread_local_top_.pending_exception_;
+}
+
+
+void Isolate::set_pending_exception(Object* exception_obj) {
+ DCHECK(!exception_obj->IsException());
+ thread_local_top_.pending_exception_ = exception_obj;
+}
+
+
+void Isolate::clear_pending_exception() {
+ DCHECK(!thread_local_top_.pending_exception_->IsException());
+ thread_local_top_.pending_exception_ = heap_.the_hole_value();
+}
+
+
+bool Isolate::has_pending_exception() {
+ DCHECK(!thread_local_top_.pending_exception_->IsException());
+ return !thread_local_top_.pending_exception_->IsTheHole();
+}
+
+
+void Isolate::clear_pending_message() {
+ thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
+}
+
+
+Object* Isolate::scheduled_exception() {
+ DCHECK(has_scheduled_exception());
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+ return thread_local_top_.scheduled_exception_;
+}
+
+
+bool Isolate::has_scheduled_exception() {
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+ return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
+}
+
+
+void Isolate::clear_scheduled_exception() {
+ DCHECK(!thread_local_top_.scheduled_exception_->IsException());
+ thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
+}
+
+
+bool Isolate::is_catchable_by_javascript(Object* exception) {
+ return exception != heap()->termination_exception();
+}
+
+
+Handle<GlobalObject> Isolate::global_object() {
+ return Handle<GlobalObject>(context()->global_object());
+}
+
+
+Isolate::ExceptionScope::ExceptionScope(Isolate* isolate)
+ : isolate_(isolate),
+ pending_exception_(isolate_->pending_exception(), isolate_) {}
+
+
+Isolate::ExceptionScope::~ExceptionScope() {
+ isolate_->set_pending_exception(*pending_exception_);
+}
+
+
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ Handle<type> Isolate::name() { \
+ return Handle<type>(native_context()->name(), this); \
+ } \
+ bool Isolate::is_##name(type* value) { \
+ return native_context()->is_##name(value); \
+ }
+NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
+#undef NATIVE_CONTEXT_FIELD_ACCESSOR
+
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ISOLATE_INL_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 1b44ee6834..3fff6b2ef7 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -2,13 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/isolate.h"
+
#include <stdlib.h>
#include <fstream> // NOLINT(readability/streams)
#include <sstream>
-#include "src/v8.h"
-
#include "src/ast.h"
#include "src/base/platform/platform.h"
#include "src/base/sys-info.h"
@@ -18,24 +18,25 @@
#include "src/codegen.h"
#include "src/compilation-cache.h"
#include "src/compilation-statistics.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
-#include "src/heap-profiler.h"
#include "src/hydrogen.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
+#include "src/isolate-inl.h"
#include "src/lithium-allocator.h"
#include "src/log.h"
#include "src/messages.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/sampler.h"
#include "src/prototype.h"
#include "src/regexp/regexp-stack.h"
#include "src/runtime-profiler.h"
-#include "src/sampler.h"
#include "src/scopeinfo.h"
#include "src/simulator.h"
#include "src/snapshot/serialize.h"
+#include "src/v8.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -312,18 +313,13 @@ static bool IsVisibleInStackTrace(JSFunction* fun,
}
// Skip all frames until we've seen the caller.
if (!(*seen_caller)) return false;
- // Also, skip non-visible built-in functions and any call with the builtins
- // object as receiver, so as to not reveal either the builtins object or
- // an internal function.
+ // Functions defined in native scripts are not visible unless directly
+ // exposed, in which case the native flag is set.
// The --builtins-in-stack-traces command line flag allows including
// internal call sites in the stack trace for debugging purposes.
if (!FLAG_builtins_in_stack_traces) {
if (receiver->IsJSBuiltinsObject()) return false;
- if (fun->IsBuiltin()) {
- return fun->shared()->native();
- } else if (!fun->IsSubjectToDebugging()) {
- return false;
- }
+ if (fun->IsBuiltin()) return fun->shared()->native();
}
return true;
}
@@ -504,7 +500,7 @@ class CaptureStackTraceHelper {
Handle<Script> script(Script::cast(fun->shared()->script()));
if (!line_key_.is_null()) {
- int script_line_offset = script->line_offset()->value();
+ int script_line_offset = script->line_offset();
int line_number = Script::GetLineNumber(script, position);
// line_number is already shifted by the script_line_offset.
int relative_line_number = line_number - script_line_offset;
@@ -516,7 +512,7 @@ class CaptureStackTraceHelper {
if (relative_line_number == 0) {
// For the case where the code is on the same line as the script
// tag.
- column_offset += script->column_offset()->value();
+ column_offset += script->column_offset();
}
JSObject::AddProperty(stack_frame, column_key_,
handle(Smi::FromInt(column_offset + 1), isolate_),
@@ -529,7 +525,7 @@ class CaptureStackTraceHelper {
if (!script_id_key_.is_null()) {
JSObject::AddProperty(stack_frame, script_id_key_,
- handle(script->id(), isolate_), NONE);
+ handle(Smi::FromInt(script->id()), isolate_), NONE);
}
if (!script_name_key_.is_null()) {
@@ -997,11 +993,10 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
// Generate the message if required.
if (requires_message && !rethrowing_message) {
- MessageLocation potential_computed_location;
- if (location == NULL) {
- // If no location was specified we use a computed one instead.
- ComputeLocation(&potential_computed_location);
- location = &potential_computed_location;
+ MessageLocation computed_location;
+ // If no location was specified we try to use a computed one instead.
+ if (location == NULL && ComputeLocation(&computed_location)) {
+ location = &computed_location;
}
if (bootstrapper()->IsActive()) {
@@ -1013,21 +1008,13 @@ Object* Isolate::Throw(Object* exception, MessageLocation* location) {
Handle<Object> message_obj = CreateMessage(exception_handle, location);
thread_local_top()->pending_message_obj_ = *message_obj;
- // For any exception not caught by JavaScript, even when an external
- // handler is present:
- // If the abort-on-uncaught-exception flag is specified, and if the
- // embedder didn't specify a custom uncaught exception callback,
- // or if the custom callback determined that V8 should abort, then
- // abort.
+ // If the abort-on-uncaught-exception flag is specified, abort on any
+ // exception not caught by JavaScript, even when an external handler is
+ // present. This flag is intended for use by JavaScript developers, so
+ // print a user-friendly stack trace (not an internal one).
if (FLAG_abort_on_uncaught_exception &&
- PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT &&
- (!abort_on_uncaught_exception_callback_ ||
- abort_on_uncaught_exception_callback_(
- reinterpret_cast<v8::Isolate*>(this)))) {
- // Prevent endless recursion.
- FLAG_abort_on_uncaught_exception = false;
- // This flag is intended for use by JavaScript developers, so
- // print a user-friendly stack trace (not an internal one).
+ PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) {
+ FLAG_abort_on_uncaught_exception = false; // Prevent endless recursion.
PrintF(stderr, "%s\n\nFROM\n",
MessageHandler::GetLocalizedMessage(this, message_obj).get());
PrintCurrentStackTrace(stderr);
@@ -1270,8 +1257,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
}
-void Isolate::ComputeLocation(MessageLocation* target) {
- *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
+bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (!it.done()) {
JavaScriptFrame* frame = it.frame();
@@ -1288,8 +1274,10 @@ void Isolate::ComputeLocation(MessageLocation* target) {
FrameSummary& summary = frames.last();
int pos = summary.code()->SourcePosition(summary.pc());
*target = MessageLocation(casted_script, pos, pos + 1, handle(fun));
+ return true;
}
}
+ return false;
}
@@ -1322,8 +1310,6 @@ bool Isolate::ComputeLocationFromException(MessageLocation* target,
bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<Object> exception) {
- *target = MessageLocation(Handle<Script>(heap_.empty_script()), -1, -1);
-
if (!exception->IsJSObject()) return false;
Handle<Name> key = factory()->stack_trace_symbol();
Handle<Object> property =
@@ -1361,7 +1347,7 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent()->IsJSProxy()) return false;
- if (JSObject::cast(iter.GetCurrent())->map()->GetConstructor() ==
+ if (iter.GetCurrent<JSObject>()->map()->GetConstructor() ==
*error_constructor) {
return true;
}
@@ -1373,7 +1359,6 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
Handle<JSArray> stack_trace_object;
- MessageLocation potential_computed_location;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (IsErrorObject(exception)) {
// We fetch the stack trace that corresponds to this error object.
@@ -1390,15 +1375,12 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
stack_trace_for_uncaught_exceptions_options_);
}
}
- if (!location) {
- if (!ComputeLocationFromException(&potential_computed_location,
- exception)) {
- if (!ComputeLocationFromStackTrace(&potential_computed_location,
- exception)) {
- ComputeLocation(&potential_computed_location);
- }
- }
- location = &potential_computed_location;
+ MessageLocation computed_location;
+ if (location == NULL &&
+ (ComputeLocationFromException(&computed_location, exception) ||
+ ComputeLocationFromStackTrace(&computed_location, exception) ||
+ ComputeLocation(&computed_location))) {
+ location = &computed_location;
}
return MessageHandler::MakeMessageObject(
@@ -1620,12 +1602,6 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions(
}
-void Isolate::SetAbortOnUncaughtExceptionCallback(
- v8::Isolate::AbortOnUncaughtExceptionCallback callback) {
- abort_on_uncaught_exception_callback_ = callback;
-}
-
-
Handle<Context> Isolate::native_context() {
return handle(context()->native_context());
}
@@ -1788,13 +1764,13 @@ Isolate::Isolate(bool enable_serializer)
deferred_handles_head_(NULL),
optimizing_compile_dispatcher_(NULL),
stress_deopt_count_(0),
+ vector_store_virtual_register_(NULL),
next_optimization_id_(0),
#if TRACE_MAPS
next_unique_sfi_id_(0),
#endif
use_counter_callback_(NULL),
- basic_block_profiler_(NULL),
- abort_on_uncaught_exception_callback_(NULL) {
+ basic_block_profiler_(NULL) {
{
base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
CHECK(thread_data_table_);
@@ -2098,7 +2074,7 @@ bool Isolate::Init(Deserializer* des) {
}
// The initialization process does not handle memory exhaustion.
- DisallowAllocationFailure disallow_allocation_failure(this);
+ AlwaysAllocateScope always_allocate(this);
memory_allocator_ = new MemoryAllocator(this);
code_range_ = new CodeRange(this);
@@ -2580,9 +2556,6 @@ Handle<JSObject> Isolate::GetSymbolRegistry() {
SetUpSubregistry(registry, map, "for");
SetUpSubregistry(registry, map, "for_api");
SetUpSubregistry(registry, map, "keyFor");
- SetUpSubregistry(registry, map, "private_api");
- heap()->AddPrivateGlobalSymbols(
- SetUpSubregistry(registry, map, "private_intern"));
}
return Handle<JSObject>::cast(factory()->symbol_registry());
}
@@ -2831,6 +2804,18 @@ SaveContext::SaveContext(Isolate* isolate)
}
+SaveContext::~SaveContext() {
+ isolate_->set_context(context_.is_null() ? NULL : *context_);
+ isolate_->set_save_context(prev_);
+}
+
+
+#ifdef DEBUG
+AssertNoContextChange::AssertNoContextChange(Isolate* isolate)
+ : isolate_(isolate), context_(isolate->context(), isolate) {}
+#endif // DEBUG
+
+
bool PostponeInterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
// First check whether the previous scope intercepts.
if (prev_ && prev_->Intercept(flag)) return true;
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 97c23bd99a..035b4b363c 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -544,10 +544,7 @@ class Isolate {
// Access to top context (where the current function object was created).
Context* context() { return thread_local_top_.context_; }
- void set_context(Context* context) {
- DCHECK(context == NULL || context->IsContext());
- thread_local_top_.context_ = context;
- }
+ inline void set_context(Context* context);
Context** context_address() { return &thread_local_top_.context_; }
THREAD_LOCAL_TOP_ACCESSOR(SaveContext*, save_context)
@@ -556,28 +553,13 @@ class Isolate {
THREAD_LOCAL_TOP_ACCESSOR(ThreadId, thread_id)
// Interface to pending exception.
- Object* pending_exception() {
- DCHECK(has_pending_exception());
- DCHECK(!thread_local_top_.pending_exception_->IsException());
- return thread_local_top_.pending_exception_;
- }
-
- void set_pending_exception(Object* exception_obj) {
- DCHECK(!exception_obj->IsException());
- thread_local_top_.pending_exception_ = exception_obj;
- }
-
- void clear_pending_exception() {
- DCHECK(!thread_local_top_.pending_exception_->IsException());
- thread_local_top_.pending_exception_ = heap_.the_hole_value();
- }
+ inline Object* pending_exception();
+ inline void set_pending_exception(Object* exception_obj);
+ inline void clear_pending_exception();
THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
- bool has_pending_exception() {
- DCHECK(!thread_local_top_.pending_exception_->IsException());
- return !thread_local_top_.pending_exception_->IsTheHole();
- }
+ inline bool has_pending_exception();
THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code)
@@ -587,9 +569,6 @@ class Isolate {
THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
- void clear_pending_message() {
- thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- }
v8::TryCatch* try_catch_handler() {
return thread_local_top_.try_catch_handler();
}
@@ -599,30 +578,19 @@ class Isolate {
THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
+ inline void clear_pending_message();
Address pending_message_obj_address() {
return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
}
- Object* scheduled_exception() {
- DCHECK(has_scheduled_exception());
- DCHECK(!thread_local_top_.scheduled_exception_->IsException());
- return thread_local_top_.scheduled_exception_;
- }
- bool has_scheduled_exception() {
- DCHECK(!thread_local_top_.scheduled_exception_->IsException());
- return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
- }
- void clear_scheduled_exception() {
- DCHECK(!thread_local_top_.scheduled_exception_->IsException());
- thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
- }
+ inline Object* scheduled_exception();
+ inline bool has_scheduled_exception();
+ inline void clear_scheduled_exception();
bool IsJavaScriptHandlerOnTop(Object* exception);
bool IsExternalHandlerOnTop(Object* exception);
- bool is_catchable_by_javascript(Object* exception) {
- return exception != heap()->termination_exception();
- }
+ inline bool is_catchable_by_javascript(Object* exception);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
@@ -649,19 +617,13 @@ class Isolate {
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
- Handle<GlobalObject> global_object() {
- return Handle<GlobalObject>(context()->global_object());
- }
+ inline Handle<GlobalObject> global_object();
// Returns the global proxy object of the current context.
JSObject* global_proxy() {
return context()->global_proxy();
}
- Handle<JSBuiltinsObject> js_builtins_object() {
- return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
- }
-
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
void FreeThreadResources() { thread_local_top_.Free(); }
@@ -680,13 +642,8 @@ class Isolate {
public:
// Scope currently can only be used for regular exceptions,
// not termination exception.
- explicit ExceptionScope(Isolate* isolate)
- : isolate_(isolate),
- pending_exception_(isolate_->pending_exception(), isolate_) {}
-
- ~ExceptionScope() {
- isolate_->set_pending_exception(*pending_exception_);
- }
+ inline explicit ExceptionScope(Isolate* isolate);
+ inline ~ExceptionScope();
private:
Isolate* isolate_;
@@ -698,9 +655,6 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- void SetAbortOnUncaughtExceptionCallback(
- v8::Isolate::AbortOnUncaughtExceptionCallback callback);
-
enum PrintStackMode { kPrintStackConcise, kPrintStackVerbose };
void PrintCurrentStackTrace(FILE* out);
void PrintStack(StringStream* accumulator,
@@ -775,7 +729,7 @@ class Isolate {
// Attempts to compute the current source location, storing the
// result in the target out parameter.
- void ComputeLocation(MessageLocation* target);
+ bool ComputeLocation(MessageLocation* target);
bool ComputeLocationFromException(MessageLocation* target,
Handle<Object> exception);
bool ComputeLocationFromStackTrace(MessageLocation* target,
@@ -837,13 +791,9 @@ class Isolate {
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
-#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
- Handle<type> name() { \
- return Handle<type>(native_context()->name(), this); \
- } \
- bool is_##name(type* value) { \
- return native_context()->is_##name(value); \
- }
+#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
+ inline Handle<type> name(); \
+ inline bool is_##name(type* value);
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -1074,6 +1024,10 @@ class Isolate {
void* stress_deopt_count_address() { return &stress_deopt_count_; }
+ void* vector_store_virtual_register_address() {
+ return &vector_store_virtual_register_;
+ }
+
base::RandomNumberGenerator* random_number_generator();
// Given an address occupied by a live code object, return that object.
@@ -1349,6 +1303,8 @@ class Isolate {
// Counts deopt points if deopt_every_n_times is enabled.
unsigned int stress_deopt_count_;
+ Address vector_store_virtual_register_;
+
int next_optimization_id_;
#if TRACE_MAPS
@@ -1369,9 +1325,6 @@ class Isolate {
std::set<Cancelable*> cancelable_tasks_;
- v8::Isolate::AbortOnUncaughtExceptionCallback
- abort_on_uncaught_exception_callback_;
-
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class OptimizingCompileDispatcher;
@@ -1417,11 +1370,7 @@ class PromiseOnStack {
class SaveContext BASE_EMBEDDED {
public:
explicit SaveContext(Isolate* isolate);
-
- ~SaveContext() {
- isolate_->set_context(context_.is_null() ? NULL : *context_);
- isolate_->set_save_context(prev_);
- }
+ ~SaveContext();
Handle<Context> context() { return context_; }
SaveContext* prev() { return prev_; }
@@ -1442,9 +1391,7 @@ class SaveContext BASE_EMBEDDED {
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- explicit AssertNoContextChange(Isolate* isolate)
- : isolate_(isolate),
- context_(isolate->context(), isolate) { }
+ explicit AssertNoContextChange(Isolate* isolate);
~AssertNoContextChange() {
DCHECK(isolate_->context() == *context_);
}
@@ -1484,11 +1431,17 @@ class StackLimitCheck BASE_EMBEDDED {
explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
// Use this to check for stack-overflows in C++ code.
- inline bool HasOverflowed() const {
+ bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
return GetCurrentStackPosition() < stack_guard->real_climit();
}
+ // Use this to check for interrupt request in C++ code.
+ bool InterruptRequested() {
+ StackGuard* stack_guard = isolate_->stack_guard();
+ return GetCurrentStackPosition() < stack_guard->climit();
+ }
+
// Use this to check for stack-overflow when entering runtime from JS code.
bool JsHasOverflowed(uintptr_t gap = 0) const;
diff --git a/deps/v8/src/iterator-prototype.js b/deps/v8/src/iterator-prototype.js
index 96dd7bfde1..2f49d90b1c 100644
--- a/deps/v8/src/iterator-prototype.js
+++ b/deps/v8/src/iterator-prototype.js
@@ -9,13 +9,14 @@ var $iteratorPrototype;
%CheckIsBootstrapping();
var GlobalObject = global.Object;
+ var iteratorSymbol = utils.ImportNow("iterator_symbol");
// 25.1.2.1 %IteratorPrototype% [ @@iterator ] ( )
function IteratorPrototypeIterator() {
return this;
}
- utils.SetFunctionName(IteratorPrototypeIterator, symbolIterator);
- %AddNamedProperty($iteratorPrototype, symbolIterator,
+ utils.SetFunctionName(IteratorPrototypeIterator, iteratorSymbol);
+ %AddNamedProperty($iteratorPrototype, iteratorSymbol,
IteratorPrototypeIterator, DONT_ENUM);
})
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 81c83bd1d8..cac4979859 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -7,6 +7,7 @@
#include "src/char-predicates.h"
#include "src/conversions.h"
+#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/messages.h"
#include "src/scanner.h"
@@ -244,9 +245,11 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
break;
}
- MessageLocation location(factory->NewScript(source_),
- position_,
- position_ + 1);
+ Handle<Script> script(factory->NewScript(source_));
+ // We should sent compile error event because we compile JSON object in
+ // separated source file.
+ isolate()->debug()->OnCompileError(script);
+ MessageLocation location(script, position_, position_ + 1);
Handle<Object> error = factory->NewSyntaxError(message, argument);
return isolate()->template Throw<Object>(error, &location);
}
@@ -263,10 +266,10 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
return Handle<Object>::null();
}
- if (isolate_->stack_guard()->InterruptRequested()) {
+ if (stack_check.InterruptRequested()) {
ExecutionAccess access(isolate_);
// Avoid blocking GC in long running parser (v8:3974).
- isolate_->stack_guard()->CheckAndHandleGCInterrupt();
+ isolate_->stack_guard()->HandleGCInterrupt();
}
if (c0_ == '"') return ParseJsonString();
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index 3bbfb63a65..fa4946dad7 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -383,12 +383,12 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
if (class_name == isolate_->heap()->String_string()) {
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, value, Execution::ToString(isolate_, object), EXCEPTION);
+ isolate_, value, Object::ToString(isolate_, object), EXCEPTION);
SerializeString(Handle<String>::cast(value));
} else if (class_name == isolate_->heap()->Number_string()) {
Handle<Object> value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate_, value, Execution::ToNumber(isolate_, object), EXCEPTION);
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate_, value, Object::ToNumber(object),
+ EXCEPTION);
if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
SerializeHeapNumber(Handle<HeapNumber>::cast(value));
} else if (class_name == isolate_->heap()->Boolean_string()) {
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index 36fda8e1e0..6f8489088b 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -17,14 +17,13 @@ var MathMax;
var MathMin;
var ObjectHasOwnProperty;
var ToNumber;
-var ToString;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MathMax = from.MathMax;
MathMin = from.MathMin;
ObjectHasOwnProperty = from.ObjectHasOwnProperty;
ToNumber = from.ToNumber;
- ToString = from.ToString;
});
// -------------------------------------------------------------------
@@ -51,13 +50,13 @@ function Revive(holder, name, reviver) {
}
}
}
- return %_CallFunction(holder, name, val, reviver);
+ return %_Call(reviver, holder, name, val);
}
function JSONParse(text, reviver) {
- var unfiltered = %ParseJson(TO_STRING_INLINE(text));
- if (IS_SPEC_FUNCTION(reviver)) {
+ var unfiltered = %ParseJson(text);
+ if (IS_CALLABLE(reviver)) {
return Revive({'': unfiltered}, '', reviver);
} else {
return unfiltered;
@@ -145,12 +144,12 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
var value = holder[key];
if (IS_SPEC_OBJECT(value)) {
var toJSON = value.toJSON;
- if (IS_SPEC_FUNCTION(toJSON)) {
- value = %_CallFunction(value, key, toJSON);
+ if (IS_CALLABLE(toJSON)) {
+ value = %_Call(toJSON, value, key);
}
}
- if (IS_SPEC_FUNCTION(replacer)) {
- value = %_CallFunction(holder, key, value, replacer);
+ if (IS_CALLABLE(replacer)) {
+ value = %_Call(replacer, holder, key, value);
}
if (IS_STRING(value)) {
return %QuoteJSONString(value);
@@ -160,7 +159,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
return value ? "true" : "false";
} else if (IS_NULL(value)) {
return "null";
- } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
+ } else if (IS_SPEC_OBJECT(value) && !IS_CALLABLE(value)) {
// Non-callable object. If it's a primitive wrapper, it must be unwrapped.
if (IS_ARRAY(value)) {
return SerializeArray(value, replacer, stack, indent, gap);
@@ -168,7 +167,7 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
value = ToNumber(value);
return JSON_NUMBER_TO_STRING(value);
} else if (IS_STRING_WRAPPER(value)) {
- return %QuoteJSONString(ToString(value));
+ return %QuoteJSONString(TO_STRING(value));
} else if (IS_BOOLEAN_WRAPPER(value)) {
return %_ValueOf(value) ? "true" : "false";
} else {
@@ -197,7 +196,7 @@ function JSONStringify(value, replacer, space) {
} else if (IS_NUMBER(v)) {
item = %_NumberToString(v);
} else if (IS_STRING_WRAPPER(v) || IS_NUMBER_WRAPPER(v)) {
- item = ToString(v);
+ item = TO_STRING(v);
} else {
continue;
}
@@ -213,12 +212,12 @@ function JSONStringify(value, replacer, space) {
if (IS_NUMBER_WRAPPER(space)) {
space = ToNumber(space);
} else if (IS_STRING_WRAPPER(space)) {
- space = ToString(space);
+ space = TO_STRING(space);
}
}
var gap;
if (IS_NUMBER(space)) {
- space = MathMax(0, MathMin($toInteger(space), 10));
+ space = MathMax(0, MathMin(TO_INTEGER(space), 10));
gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
@@ -234,7 +233,7 @@ function JSONStringify(value, replacer, space) {
// -------------------------------------------------------------------
-%AddNamedProperty(GlobalJSON, symbolToStringTag, "JSON", READ_ONLY | DONT_ENUM);
+%AddNamedProperty(GlobalJSON, toStringTagSymbol, "JSON", READ_ONLY | DONT_ENUM);
// Set up non-enumerable properties of the JSON object.
utils.InstallFunctions(GlobalJSON, DONT_ENUM, [
@@ -252,8 +251,6 @@ function JsonSerializeAdapter(key, object) {
return JSONSerialize(key, holder, UNDEFINED, new InternalArray(), "", "");
}
-utils.ExportToRuntime(function(to) {
- to.JsonSerializeAdapter = JsonSerializeAdapter;
-});
+%InstallToContext(["json_serialize_adapter", JsonSerializeAdapter]);
})
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index b961a7de96..11a72e732d 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <sstream>
+#include "src/layout-descriptor.h"
-#include "src/v8.h"
+#include <sstream>
#include "src/base/bits.h"
-#include "src/layout-descriptor.h"
+#include "src/handles-inl.h"
using v8::base::bits::CountTrailingZeros32;
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 94ef14dbae..5a247d5fd7 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -250,7 +250,7 @@ int SortedListBSearch(const List<T>& list, P cmp) {
int low = 0;
int high = list.length() - 1;
while (low <= high) {
- int mid = (low + high) / 2;
+ int mid = low + (high - low) / 2;
T mid_elem = list[mid];
if (cmp(&mid_elem) > 0) {
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index afa28bbb40..36a12e75b3 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/lithium-allocator.h"
#include "src/hydrogen.h"
#include "src/lithium-inl.h"
@@ -1336,14 +1336,9 @@ void LAllocator::BuildLiveRanges() {
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
- if (chunk_->info()->IsStub()) {
- CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
- PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
- } else {
- DCHECK(chunk_->info()->IsOptimizing());
+ {
AllowHandleDereference allow_deref;
- PrintF("Function: %s\n",
- chunk_->info()->literal()->debug_name()->ToCString().get());
+ PrintF("Function: %s\n", chunk_->info()->GetDebugName().get());
}
PrintF("Value %d used before first definition!\n", operand_index);
LiveRange* range = LiveRangeFor(operand_index);
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 24c1301dc5..267df58ccd 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -6,8 +6,6 @@
#include <sstream>
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_IA32
#include "src/ia32/lithium-ia32.h" // NOLINT
#include "src/ia32/lithium-codegen-ia32.h" // NOLINT
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index a9d7748ef3..7d37532ace 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -4,8 +4,6 @@
#include "src/lithium.h"
-#include "src/v8.h"
-
#include "src/scopes.h"
#if V8_TARGET_ARCH_IA32
@@ -522,9 +520,7 @@ Handle<Code> LChunk::Codegen() {
if (generator.GenerateCode()) {
generator.CheckEnvironmentUsage();
CodeGenerator::MakeCodePrologue(info(), "optimized");
- Code::Flags flags = info()->flags();
- Handle<Code> code =
- CodeGenerator::MakeCodeEpilogue(&assembler, flags, info());
+ Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&assembler, info());
generator.FinishCode(code);
CommitDependencies(code);
code->set_is_crankshafted(true);
diff --git a/deps/v8/src/log-inl.h b/deps/v8/src/log-inl.h
index a96631d059..520d05c4d3 100644
--- a/deps/v8/src/log-inl.h
+++ b/deps/v8/src/log-inl.h
@@ -7,14 +7,15 @@
#include "src/log.h"
#include "src/isolate.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
Logger::LogEventsAndTags Logger::ToNativeByScript(Logger::LogEventsAndTags tag,
Script* script) {
- if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG)
- && script->type()->value() == Script::TYPE_NATIVE) {
+ if ((tag == FUNCTION_TAG || tag == LAZY_COMPILE_TAG || tag == SCRIPT_TAG) &&
+ script->type() == Script::TYPE_NATIVE) {
switch (tag) {
case FUNCTION_TAG: return NATIVE_FUNCTION_TAG;
case LAZY_COMPILE_TAG: return NATIVE_LAZY_COMPILE_TAG;
diff --git a/deps/v8/src/log-utils.h b/deps/v8/src/log-utils.h
index 87dab52406..67143078a9 100644
--- a/deps/v8/src/log-utils.h
+++ b/deps/v8/src/log-utils.h
@@ -5,6 +5,8 @@
#ifndef V8_LOG_UTILS_H_
#define V8_LOG_UTILS_H_
+#include <cstdarg>
+
#include "src/allocation.h"
#include "src/base/platform/mutex.h"
#include "src/flags.h"
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 84c863b654..ea69fb4bee 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -7,18 +7,16 @@
#include <cstdarg>
#include <sstream>
-#include "src/v8.h"
-
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
#include "src/log-inl.h"
#include "src/log-utils.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/runtime-profiler.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
@@ -505,6 +503,7 @@ class JitLogger : public CodeEventLogger {
int length);
JitCodeEventHandler code_event_handler_;
+ base::Mutex logger_mutex_;
};
@@ -534,6 +533,7 @@ void JitLogger::LogRecordedBuffer(Code* code,
void JitLogger::CodeMoveEvent(Address from, Address to) {
+ base::LockGuard<base::Mutex> guard(&logger_mutex_);
Code* from_code = Code::cast(HeapObject::FromAddress(from));
JitCodeEvent event;
@@ -1610,8 +1610,7 @@ void Logger::LogCodeObject(Object* object) {
case Code::COMPARE_NIL_IC: // fall through
case Code::TO_BOOLEAN_IC: // fall through
case Code::STUB:
- description =
- CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true);
+ description = CodeStub::MajorName(CodeStub::GetMajorKey(code_object));
if (description == NULL)
description = "A stub from the snapshot";
tag = Logger::STUB_TAG;
@@ -1648,6 +1647,10 @@ void Logger::LogCodeObject(Object* object) {
description = "A keyed store IC from the snapshot";
tag = Logger::KEYED_STORE_IC_TAG;
break;
+ case Code::WASM_FUNCTION:
+ description = "A wasm function";
+ tag = Logger::STUB_TAG;
+ break;
case Code::NUMBER_OF_KINDS:
break;
}
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index a98e5d9a11..809c35e4a5 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/lookup.h"
#include "src/bootstrapper.h"
#include "src/deoptimizer.h"
-#include "src/lookup.h"
+#include "src/isolate-inl.h"
#include "src/lookup-inl.h"
namespace v8 {
@@ -94,7 +94,7 @@ Handle<JSObject> LookupIterator::GetStoreTarget() const {
if (receiver_->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate(), receiver_);
if (iter.IsAtEnd()) return Handle<JSGlobalProxy>::cast(receiver_);
- return Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter));
+ return PrototypeIterator::GetCurrent<JSGlobalObject>(iter);
}
return Handle<JSObject>::cast(receiver_);
}
@@ -134,7 +134,7 @@ void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
ElementsKind kind = holder_map_->elements_kind();
ElementsKind to = value->OptimalElementsKind();
if (IsHoleyElementsKind(kind)) to = GetHoleyElementsKind(to);
- to = IsMoreGeneralElementsKindTransition(kind, to) ? to : kind;
+ to = GetMoreGeneralElementsKind(kind, to);
JSObject::TransitionElementsKind(holder, to);
holder_map_ = handle(holder->map(), isolate_);
@@ -372,7 +372,7 @@ bool LookupIterator::InternalHolderIsReceiverOrHiddenPrototype() const {
PrototypeIterator iter(isolate(), current,
PrototypeIterator::START_AT_RECEIVER);
do {
- if (JSReceiver::cast(iter.GetCurrent()) == holder) return true;
+ if (iter.GetCurrent<JSReceiver>() == holder) return true;
DCHECK(!current->IsJSProxy());
iter.Advance();
} while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 5ea9657515..c865a5fb0c 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -5,6 +5,8 @@
#ifndef V8_MACRO_ASSEMBLER_H_
#define V8_MACRO_ASSEMBLER_H_
+#include "src/assembler.h"
+
// Helper types to make boolean flag easier to read at call-site.
enum InvokeFlag {
@@ -32,62 +34,43 @@ enum AllocationFlags {
};
-// Invalid depth in prototype chain.
-const int kInvalidProtoDepth = -1;
-
#if V8_TARGET_ARCH_IA32
-#include "src/assembler.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/assembler-ia32-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/ia32/macro-assembler-ia32.h"
#elif V8_TARGET_ARCH_X64
-#include "src/assembler.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/assembler-x64-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/x64/macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/constants-arm64.h"
-#include "src/assembler.h"
-#include "src/arm64/assembler-arm64.h" // NOLINT
+#include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
-#include "src/arm64/macro-assembler-arm64.h" // NOLINT
+#include "src/arm64/constants-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
-#include "src/arm/constants-arm.h"
-#include "src/assembler.h"
-#include "src/arm/assembler-arm.h" // NOLINT
+#include "src/arm/assembler-arm.h"
#include "src/arm/assembler-arm-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
-#include "src/arm/macro-assembler-arm.h" // NOLINT
+#include "src/arm/constants-arm.h"
+#include "src/arm/macro-assembler-arm.h"
#elif V8_TARGET_ARCH_PPC
-#include "src/ppc/constants-ppc.h"
-#include "src/assembler.h" // NOLINT
-#include "src/ppc/assembler-ppc.h" // NOLINT
+#include "src/ppc/assembler-ppc.h"
#include "src/ppc/assembler-ppc-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
+#include "src/ppc/constants-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/constants-mips.h"
-#include "src/assembler.h" // NOLINT
-#include "src/mips/assembler-mips.h" // NOLINT
+#include "src/mips/assembler-mips.h"
#include "src/mips/assembler-mips-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
+#include "src/mips/constants-mips.h"
#include "src/mips/macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
-#include "src/mips64/constants-mips64.h"
-#include "src/assembler.h" // NOLINT
-#include "src/mips64/assembler-mips64.h" // NOLINT
+#include "src/mips64/assembler-mips64.h"
#include "src/mips64/assembler-mips64-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
+#include "src/mips64/constants-mips64.h"
#include "src/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_X87
-#include "src/assembler.h"
#include "src/x87/assembler-x87.h"
#include "src/x87/assembler-x87-inl.h"
-#include "src/code.h" // NOLINT, must be after assembler_*.h
#include "src/x87/macro-assembler-x87.h"
#else
#error Unsupported target architecture.
@@ -245,6 +228,35 @@ class Comment {
#endif // DEBUG
+// Wrapper class for passing expected and actual parameter counts as
+// either registers or immediate values. Used to make sure that the
+// caller provides exactly the expected number of parameters to the
+// callee.
+class ParameterCount BASE_EMBEDDED {
+ public:
+ explicit ParameterCount(Register reg) : reg_(reg), immediate_(0) {}
+ explicit ParameterCount(int imm) : reg_(no_reg), immediate_(imm) {}
+
+ bool is_reg() const { return !reg_.is(no_reg); }
+ bool is_immediate() const { return !is_reg(); }
+
+ Register reg() const {
+ DCHECK(is_reg());
+ return reg_;
+ }
+ int immediate() const {
+ DCHECK(is_immediate());
+ return immediate_;
+ }
+
+ private:
+ const Register reg_;
+ const int immediate_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ParameterCount);
+};
+
+
class AllocationUtils {
public:
static ExternalReference GetAllocationTopReference(
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 49b2cad8bd..6de9120fb2 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -39,10 +39,6 @@ define NEW_TWO_BYTE_STRING = false;
define GETTER = 0;
define SETTER = 1;
-define NO_HINT = 0;
-define NUMBER_HINT = 1;
-define STRING_HINT = 2;
-
# For date.js.
define HoursPerDay = 24;
define MinutesPerHour = 60;
@@ -74,6 +70,9 @@ define kMaxMonth = 10000000;
# Reflect.construct().
define kSafeArgumentsLength = 0x800000;
+# 2^53 - 1
+define kMaxSafeInteger = 9007199254740991;
+
# Strict mode flags for passing to %SetProperty
define kSloppyMode = 0;
define kStrictMode = 1;
@@ -93,7 +92,7 @@ macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
-macro IS_OBJECT(arg) = (%_IsObject(arg));
+macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_DATE(arg) = (%_IsDate(arg));
macro IS_FUNCTION(arg) = (%_IsFunction(arg));
@@ -128,10 +127,7 @@ macro IS_SPEC_OBJECT(arg) = (%_IsSpecObject(arg));
# Macro for ECMAScript 5 queries of the type:
# "IsCallable(O)"
-# We assume here that this is the same as being either a function or a function
-# proxy. That ignores host objects with [[Call]] methods, but in most situations
-# we cannot handle those anyway.
-macro IS_SPEC_FUNCTION(arg) = (%_ClassOf(arg) === 'Function');
+macro IS_CALLABLE(arg) = (typeof(arg) === 'function');
# Macro for ES6 CheckObjectCoercible
# Will throw a TypeError of the form "[functionName] called on null or undefined".
@@ -145,28 +141,31 @@ define kBoundArgumentsStartIndex = 2;
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro NUMBER_IS_FINITE(arg) = (%_IsSmi(%IS_VAR(arg)) || ((arg == arg) && (arg != 1/0) && (arg != -1/0)));
-macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToInteger(ToNumber(arg)));
-macro TO_INTEGER_FOR_SIDE_EFFECT(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToNumber(arg));
+macro TO_INTEGER(arg) = (%_ToInteger(arg));
macro TO_INTEGER_MAP_MINUS_ZERO(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : %NumberToIntegerMapMinusZero(ToNumber(arg)));
macro TO_INT32(arg) = (arg | 0);
macro TO_UINT32(arg) = (arg >>> 0);
-macro TO_STRING_INLINE(arg) = (IS_STRING(%IS_VAR(arg)) ? arg : $nonStringToString(arg));
+macro TO_LENGTH(arg) = (%ToLength(arg));
+macro TO_LENGTH_OR_UINT32(arg) = (harmony_tolength ? TO_LENGTH(arg) : TO_UINT32(arg));
+macro TO_STRING(arg) = (%_ToString(arg));
macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : $nonNumberToNumber(arg));
macro TO_OBJECT(arg) = (%_ToObject(arg));
+macro TO_PRIMITIVE(arg) = (%_ToPrimitive(arg));
+macro TO_PRIMITIVE_NUMBER(arg) = (%_ToPrimitive_Number(arg));
+macro TO_PRIMITIVE_STRING(arg) = (%_ToPrimitive_String(arg));
+macro TO_NAME(arg) = (%_ToName(arg));
macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
macro HAS_OWN_PROPERTY(arg, index) = (%_CallFunction(arg, index, ObjectHasOwnProperty));
-macro SHOULD_CREATE_WRAPPER(functionName, receiver) = (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(functionName));
macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
+macro MAX_SIMPLE(argA, argB) = (argA < argB ? argB : argA);
+macro MIN_SIMPLE(argA, argB) = (argA < argB ? argA : argB);
# Private names.
-macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
-macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
macro GET_PRIVATE(obj, sym) = (obj[sym]);
macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
-macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]);
# Constants. The compiler constant folds them.
define NAN = $NaN;
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 51f4d445c7..05eb9e46d7 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -15,6 +15,7 @@ var rngstate; // Initialized to a Uint32Array during genesis.
var GlobalMath = global.Math;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
//-------------------------------------------------------------------
@@ -288,7 +289,7 @@ function CubeRoot(x) {
// -------------------------------------------------------------------
-%AddNamedProperty(GlobalMath, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
+%AddNamedProperty(GlobalMath, toStringTagSymbol, "Math", READ_ONLY | DONT_ENUM);
// Set up math constants.
utils.InstallConstants(GlobalMath, [
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index 3de6717cbc..640c2dff4e 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/messages.h"
#include "src/api.h"
#include "src/execution.h"
-#include "src/messages.h"
+#include "src/isolate-inl.h"
#include "src/string-builder.h"
namespace v8 {
@@ -34,17 +34,20 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
- Isolate* isolate, MessageTemplate::Template message, MessageLocation* loc,
- Handle<Object> argument, Handle<JSArray> stack_frames) {
+ Isolate* isolate, MessageTemplate::Template message,
+ MessageLocation* location, Handle<Object> argument,
+ Handle<JSArray> stack_frames) {
Factory* factory = isolate->factory();
- int start = 0;
- int end = 0;
+ int start = -1;
+ int end = -1;
Handle<Object> script_handle = factory->undefined_value();
- if (loc) {
- start = loc->start_pos();
- end = loc->end_pos();
- script_handle = Script::GetWrapper(loc->script());
+ if (location != NULL) {
+ start = location->start_pos();
+ end = location->end_pos();
+ script_handle = Script::GetWrapper(location->script());
+ } else {
+ script_handle = Script::GetWrapper(isolate->factory()->empty_script());
}
Handle<Object> stack_frames_handle = stack_frames.is_null()
@@ -139,37 +142,50 @@ base::SmartArrayPointer<char> MessageHandler::GetLocalizedMessage(
}
-Handle<Object> CallSite::GetFileName(Isolate* isolate) {
- Handle<Object> script(fun_->shared()->script(), isolate);
+CallSite::CallSite(Isolate* isolate, Handle<JSObject> call_site_obj)
+ : isolate_(isolate) {
+ receiver_ = JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_receiver_symbol());
+ fun_ = Handle<JSFunction>::cast(JSObject::GetDataProperty(
+ call_site_obj, isolate->factory()->call_site_function_symbol()));
+ pos_ = Handle<Smi>::cast(JSObject::GetDataProperty(
+ call_site_obj,
+ isolate->factory()->call_site_position_symbol()))
+ ->value();
+}
+
+
+Handle<Object> CallSite::GetFileName() {
+ Handle<Object> script(fun_->shared()->script(), isolate_);
if (script->IsScript()) {
- return Handle<Object>(Handle<Script>::cast(script)->name(), isolate);
+ return Handle<Object>(Handle<Script>::cast(script)->name(), isolate_);
}
- return isolate->factory()->null_value();
+ return isolate_->factory()->null_value();
}
-Handle<Object> CallSite::GetFunctionName(Isolate* isolate) {
+Handle<Object> CallSite::GetFunctionName() {
Handle<String> result = JSFunction::GetDebugName(fun_);
if (result->length() != 0) return result;
- Handle<Object> script(fun_->shared()->script(), isolate);
+ Handle<Object> script(fun_->shared()->script(), isolate_);
if (script->IsScript() &&
Handle<Script>::cast(script)->compilation_type() ==
Script::COMPILATION_TYPE_EVAL) {
- return isolate->factory()->eval_string();
+ return isolate_->factory()->eval_string();
}
- return isolate->factory()->null_value();
+ return isolate_->factory()->null_value();
}
-Handle<Object> CallSite::GetScriptNameOrSourceUrl(Isolate* isolate) {
- Handle<Object> script_obj(fun_->shared()->script(), isolate);
+Handle<Object> CallSite::GetScriptNameOrSourceUrl() {
+ Handle<Object> script_obj(fun_->shared()->script(), isolate_);
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
Object* source_url = script->source_url();
- if (source_url->IsString()) return Handle<Object>(source_url, isolate);
- return Handle<Object>(script->name(), isolate);
+ if (source_url->IsString()) return Handle<Object>(source_url, isolate_);
+ return Handle<Object>(script->name(), isolate_);
}
- return isolate->factory()->null_value();
+ return isolate_->factory()->null_value();
}
@@ -191,25 +207,25 @@ bool CheckMethodName(Isolate* isolate, Handle<JSObject> obj, Handle<Name> name,
}
-Handle<Object> CallSite::GetMethodName(Isolate* isolate) {
- MaybeHandle<JSReceiver> maybe = Object::ToObject(isolate, receiver_);
+Handle<Object> CallSite::GetMethodName() {
+ MaybeHandle<JSReceiver> maybe = Object::ToObject(isolate_, receiver_);
Handle<JSReceiver> receiver;
if (!maybe.ToHandle(&receiver) || !receiver->IsJSObject()) {
- return isolate->factory()->null_value();
+ return isolate_->factory()->null_value();
}
Handle<JSObject> obj = Handle<JSObject>::cast(receiver);
- Handle<Object> function_name(fun_->shared()->name(), isolate);
+ Handle<Object> function_name(fun_->shared()->name(), isolate_);
if (function_name->IsName()) {
Handle<Name> name = Handle<Name>::cast(function_name);
- if (CheckMethodName(isolate, obj, name, fun_,
+ if (CheckMethodName(isolate_, obj, name, fun_,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR))
return name;
}
- HandleScope outer_scope(isolate);
+ HandleScope outer_scope(isolate_);
Handle<Object> result;
- for (PrototypeIterator iter(isolate, obj,
+ for (PrototypeIterator iter(isolate_, obj,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
@@ -218,26 +234,26 @@ Handle<Object> CallSite::GetMethodName(Isolate* isolate) {
if (current_obj->IsAccessCheckNeeded()) break;
Handle<FixedArray> keys = JSObject::GetEnumPropertyKeys(current_obj, false);
for (int i = 0; i < keys->length(); i++) {
- HandleScope inner_scope(isolate);
+ HandleScope inner_scope(isolate_);
if (!keys->get(i)->IsName()) continue;
- Handle<Name> name_key(Name::cast(keys->get(i)), isolate);
- if (!CheckMethodName(isolate, current_obj, name_key, fun_,
+ Handle<Name> name_key(Name::cast(keys->get(i)), isolate_);
+ if (!CheckMethodName(isolate_, current_obj, name_key, fun_,
LookupIterator::OWN_SKIP_INTERCEPTOR))
continue;
// Return null in case of duplicates to avoid confusion.
- if (!result.is_null()) return isolate->factory()->null_value();
+ if (!result.is_null()) return isolate_->factory()->null_value();
result = inner_scope.CloseAndEscape(name_key);
}
}
if (!result.is_null()) return outer_scope.CloseAndEscape(result);
- return isolate->factory()->null_value();
+ return isolate_->factory()->null_value();
}
-int CallSite::GetLineNumber(Isolate* isolate) {
+int CallSite::GetLineNumber() {
if (pos_ >= 0) {
- Handle<Object> script_obj(fun_->shared()->script(), isolate);
+ Handle<Object> script_obj(fun_->shared()->script(), isolate_);
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
return Script::GetLineNumber(script, pos_) + 1;
@@ -247,9 +263,9 @@ int CallSite::GetLineNumber(Isolate* isolate) {
}
-int CallSite::GetColumnNumber(Isolate* isolate) {
+int CallSite::GetColumnNumber() {
if (pos_ >= 0) {
- Handle<Object> script_obj(fun_->shared()->script(), isolate);
+ Handle<Object> script_obj(fun_->shared()->script(), isolate_);
if (script_obj->IsScript()) {
Handle<Script> script = Handle<Script>::cast(script_obj);
return Script::GetColumnNumber(script, pos_) + 1;
@@ -259,32 +275,32 @@ int CallSite::GetColumnNumber(Isolate* isolate) {
}
-bool CallSite::IsNative(Isolate* isolate) {
- Handle<Object> script(fun_->shared()->script(), isolate);
+bool CallSite::IsNative() {
+ Handle<Object> script(fun_->shared()->script(), isolate_);
return script->IsScript() &&
- Handle<Script>::cast(script)->type()->value() == Script::TYPE_NATIVE;
+ Handle<Script>::cast(script)->type() == Script::TYPE_NATIVE;
}
-bool CallSite::IsToplevel(Isolate* isolate) {
+bool CallSite::IsToplevel() {
return receiver_->IsJSGlobalProxy() || receiver_->IsNull() ||
receiver_->IsUndefined();
}
-bool CallSite::IsEval(Isolate* isolate) {
- Handle<Object> script(fun_->shared()->script(), isolate);
+bool CallSite::IsEval() {
+ Handle<Object> script(fun_->shared()->script(), isolate_);
return script->IsScript() &&
Handle<Script>::cast(script)->compilation_type() ==
Script::COMPILATION_TYPE_EVAL;
}
-bool CallSite::IsConstructor(Isolate* isolate) {
+bool CallSite::IsConstructor() {
if (!receiver_->IsJSObject()) return false;
Handle<Object> constructor =
JSReceiver::GetDataProperty(Handle<JSObject>::cast(receiver_),
- isolate->factory()->constructor_string());
+ isolate_->factory()->constructor_string());
return constructor.is_identical_to(fun_);
}
@@ -456,7 +472,7 @@ MaybeHandle<String> ErrorToStringHelper::GetStringifiedProperty(
String);
if (obj->IsUndefined()) return default_value;
if (!obj->IsString()) {
- ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Execution::ToString(isolate, obj),
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, obj, Object::ToString(isolate, obj),
String);
}
return Handle<String>::cast(obj);
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 779e60e57f..bb78f3d0a5 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -11,6 +11,7 @@
#define V8_MESSAGES_H_
#include "src/base/smart-pointers.h"
+#include "src/handles.h"
#include "src/list.h"
namespace v8 {
@@ -46,23 +47,23 @@ class MessageLocation {
class CallSite {
public:
- CallSite(Handle<Object> receiver, Handle<JSFunction> fun, int pos)
- : receiver_(receiver), fun_(fun), pos_(pos) {}
+ CallSite(Isolate* isolate, Handle<JSObject> call_site_obj);
- Handle<Object> GetFileName(Isolate* isolate);
- Handle<Object> GetFunctionName(Isolate* isolate);
- Handle<Object> GetScriptNameOrSourceUrl(Isolate* isolate);
- Handle<Object> GetMethodName(Isolate* isolate);
+ Handle<Object> GetFileName();
+ Handle<Object> GetFunctionName();
+ Handle<Object> GetScriptNameOrSourceUrl();
+ Handle<Object> GetMethodName();
// Return 1-based line number, including line offset.
- int GetLineNumber(Isolate* isolate);
+ int GetLineNumber();
// Return 1-based column number, including column offset if first line.
- int GetColumnNumber(Isolate* isolate);
- bool IsNative(Isolate* isolate);
- bool IsToplevel(Isolate* isolate);
- bool IsEval(Isolate* isolate);
- bool IsConstructor(Isolate* isolate);
+ int GetColumnNumber();
+ bool IsNative();
+ bool IsToplevel();
+ bool IsEval();
+ bool IsConstructor();
private:
+ Isolate* isolate_;
Handle<Object> receiver_;
Handle<JSFunction> fun_;
int pos_;
@@ -257,6 +258,7 @@ class CallSite {
"Offset is outside the bounds of the DataView") \
T(InvalidDataViewLength, "Invalid data view length") \
T(InvalidDataViewOffset, "Start offset is outside the bounds of the buffer") \
+ T(InvalidHint, "Invalid hint: %") \
T(InvalidLanguageTag, "Invalid language tag: %") \
T(InvalidWeakMapKey, "Invalid value used as weak map key") \
T(InvalidWeakSetValue, "Invalid value used in weak set") \
@@ -296,6 +298,8 @@ class CallSite {
T(IllegalAccess, "Illegal access") \
T(IllegalBreak, "Illegal break statement") \
T(IllegalContinue, "Illegal continue statement") \
+ T(IllegalLanguageModeDirective, \
+ "Illegal '%' directive in function with non-simple parameter list") \
T(IllegalReturn, "Illegal return statement") \
T(InvalidLhsInAssignment, "Invalid left-hand side in assignment") \
T(InvalidLhsInFor, "Invalid left-hand side in for-loop") \
@@ -308,8 +312,6 @@ class CallSite {
T(MalformedArrowFunParamList, "Malformed arrow function parameter list") \
T(MalformedRegExp, "Invalid regular expression: /%/: %") \
T(MalformedRegExpFlags, "Invalid regular expression flags") \
- T(MissingArrow, \
- "Expected () to start arrow function, but got '%' instead of '=>'") \
T(ModuleExportUndefined, "Export '%' is not defined in module") \
T(MultipleDefaultsInSwitch, \
"More than one default clause in switch statement") \
@@ -334,6 +336,8 @@ class CallSite {
T(StrictWith, "Strict mode code may not include a with statement") \
T(StrongArguments, \
"In strong mode, 'arguments' is deprecated, use '...args' instead") \
+ T(StrongConstructorDirective, \
+ "\"use strong\" directive is disallowed in class constructor body") \
T(StrongConstructorReturnMisplaced, \
"In strong mode, returning from a constructor before its super " \
"constructor invocation or all assignments to 'this' is deprecated") \
@@ -432,8 +436,9 @@ class MessageHandler {
public:
// Returns a message object for the API to use.
static Handle<JSMessageObject> MakeMessageObject(
- Isolate* isolate, MessageTemplate::Template type, MessageLocation* loc,
- Handle<Object> argument, Handle<JSArray> stack_frames);
+ Isolate* isolate, MessageTemplate::Template type,
+ MessageLocation* location, Handle<Object> argument,
+ Handle<JSArray> stack_frames);
// Report a formatted message (needs JS allocation).
static void ReportMessage(Isolate* isolate, MessageLocation* loc,
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 32766a89fe..5441cfe34a 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -5,8 +5,6 @@
// -------------------------------------------------------------------
var $errorToString;
-var $internalErrorSymbol;
-var $stackTraceSymbol;
var MakeError;
var MakeEvalError;
var MakeRangeError;
@@ -26,19 +24,34 @@ var ArrayJoin;
var Bool16x8ToString;
var Bool32x4ToString;
var Bool8x16ToString;
+var callSiteReceiverSymbol =
+ utils.ImportNow("call_site_receiver_symbol");
+var callSiteFunctionSymbol =
+ utils.ImportNow("call_site_function_symbol");
+var callSitePositionSymbol =
+ utils.ImportNow("call_site_position_symbol");
+var callSiteStrictSymbol =
+ utils.ImportNow("call_site_strict_symbol");
var Float32x4ToString;
+var formattedStackTraceSymbol =
+ utils.ImportNow("formatted_stack_trace_symbol");
var FunctionSourceString
var GlobalObject = global.Object;
var Int16x8ToString;
var Int32x4ToString;
var Int8x16ToString;
var InternalArray = utils.InternalArray;
+var internalErrorSymbol = utils.ImportNow("internal_error_symbol");
var ObjectDefineProperty;
var ObjectToString;
+var stackTraceSymbol = utils.ImportNow("stack_trace_symbol");
var StringCharAt;
var StringIndexOf;
var StringSubstring;
-var ToString;
+var SymbolToString;
+var Uint16x8ToString;
+var Uint32x4ToString;
+var Uint8x16ToString;
utils.Import(function(from) {
ArrayJoin = from.ArrayJoin;
@@ -55,10 +68,10 @@ utils.Import(function(from) {
StringCharAt = from.StringCharAt;
StringIndexOf = from.StringIndexOf;
StringSubstring = from.StringSubstring;
-});
-
-utils.ImportNow(function(from) {
- ToString = from.ToString;
+ SymbolToString = from.SymbolToString;
+ Uint16x8ToString = from.Uint16x8ToString;
+ Uint32x4ToString = from.Uint32x4ToString;
+ Uint8x16ToString = from.Uint8x16ToString;
});
// -------------------------------------------------------------------
@@ -93,16 +106,19 @@ function NoSideEffectToString(obj) {
}
return str;
}
- if (IS_SYMBOL(obj)) return %_CallFunction(obj, $symbolToString);
+ if (IS_SYMBOL(obj)) return %_CallFunction(obj, SymbolToString);
if (IS_SIMD_VALUE(obj)) {
switch (typeof(obj)) {
case 'float32x4': return %_CallFunction(obj, Float32x4ToString);
case 'int32x4': return %_CallFunction(obj, Int32x4ToString);
- case 'bool32x4': return %_CallFunction(obj, Bool32x4ToString);
- case 'int16x8': return %_CallFunction(obj, Int16x8ToString);
- case 'bool16x8': return %_CallFunction(obj, Bool16x8ToString);
case 'int16x8': return %_CallFunction(obj, Int16x8ToString);
+ case 'int8x16': return %_CallFunction(obj, Int8x16ToString);
+ case 'uint32x4': return %_CallFunction(obj, Uint32x4ToString);
+ case 'uint16x8': return %_CallFunction(obj, Uint16x8ToString);
+ case 'uint8x16': return %_CallFunction(obj, Uint8x16ToString);
+ case 'bool32x4': return %_CallFunction(obj, Bool32x4ToString);
case 'bool16x8': return %_CallFunction(obj, Bool16x8ToString);
+ case 'bool8x16': return %_CallFunction(obj, Bool8x16ToString);
}
}
if (IS_OBJECT(obj)
@@ -152,7 +168,7 @@ function ToStringCheckErrorObject(obj) {
if (CanBeSafelyTreatedAsAnErrorObject(obj)) {
return %_CallFunction(obj, ErrorToString);
} else {
- return ToString(obj);
+ return TO_STRING(obj);
}
}
@@ -173,7 +189,7 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, arg0, arg1, arg2) {
var error = new constructor(FormatMessage(type, arg0, arg1, arg2));
- error[$internalErrorSymbol] = true;
+ error[internalErrorSymbol] = true;
return error;
}
@@ -574,112 +590,77 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
-var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
-var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
-var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
-var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
-
function CallSite(receiver, fun, pos, strict_mode) {
- SET_PRIVATE(this, CallSiteReceiverKey, receiver);
- SET_PRIVATE(this, CallSiteFunctionKey, fun);
- SET_PRIVATE(this, CallSitePositionKey, pos);
- SET_PRIVATE(this, CallSiteStrictModeKey, strict_mode);
+ SET_PRIVATE(this, callSiteReceiverSymbol, receiver);
+ SET_PRIVATE(this, callSiteFunctionSymbol, fun);
+ SET_PRIVATE(this, callSitePositionSymbol, pos);
+ SET_PRIVATE(this, callSiteStrictSymbol, strict_mode);
}
function CallSiteGetThis() {
- return GET_PRIVATE(this, CallSiteStrictModeKey)
- ? UNDEFINED : GET_PRIVATE(this, CallSiteReceiverKey);
+ return GET_PRIVATE(this, callSiteStrictSymbol)
+ ? UNDEFINED : GET_PRIVATE(this, callSiteReceiverSymbol);
}
function CallSiteGetFunction() {
- return GET_PRIVATE(this, CallSiteStrictModeKey)
- ? UNDEFINED : GET_PRIVATE(this, CallSiteFunctionKey);
+ return GET_PRIVATE(this, callSiteStrictSymbol)
+ ? UNDEFINED : GET_PRIVATE(this, callSiteFunctionSymbol);
}
function CallSiteGetPosition() {
- return GET_PRIVATE(this, CallSitePositionKey);
+ return GET_PRIVATE(this, callSitePositionSymbol);
}
function CallSiteGetTypeName() {
- return GetTypeName(GET_PRIVATE(this, CallSiteReceiverKey), false);
+ return GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), false);
}
function CallSiteIsToplevel() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteIsToplevelRT(receiver, fun, pos);
+ return %CallSiteIsToplevelRT(this);
}
function CallSiteIsEval() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteIsEvalRT(receiver, fun, pos);
+ return %CallSiteIsEvalRT(this);
}
function CallSiteGetEvalOrigin() {
- var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
+ var script = %FunctionGetScript(GET_PRIVATE(this, callSiteFunctionSymbol));
return FormatEvalOrigin(script);
}
function CallSiteGetScriptNameOrSourceURL() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteGetScriptNameOrSourceUrlRT(receiver, fun, pos);
+ return %CallSiteGetScriptNameOrSourceUrlRT(this);
}
function CallSiteGetFunctionName() {
// See if the function knows its own name
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteGetFunctionNameRT(receiver, fun, pos);
+ return %CallSiteGetFunctionNameRT(this);
}
function CallSiteGetMethodName() {
// See if we can find a unique property on the receiver that holds
// this function.
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteGetMethodNameRT(receiver, fun, pos);
+ return %CallSiteGetMethodNameRT(this);
}
function CallSiteGetFileName() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteGetFileNameRT(receiver, fun, pos);
+ return %CallSiteGetFileNameRT(this);
}
function CallSiteGetLineNumber() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteGetLineNumberRT(receiver, fun, pos);
+ return %CallSiteGetLineNumberRT(this);
}
function CallSiteGetColumnNumber() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteGetColumnNumberRT(receiver, fun, pos);
+ return %CallSiteGetColumnNumberRT(this);
}
function CallSiteIsNative() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteIsNativeRT(receiver, fun, pos);
+ return %CallSiteIsNativeRT(this);
}
function CallSiteIsConstructor() {
- var receiver = GET_PRIVATE(this, CallSiteReceiverKey);
- var fun = GET_PRIVATE(this, CallSiteFunctionKey);
- var pos = GET_PRIVATE(this, CallSitePositionKey);
- return %CallSiteIsConstructorRT(receiver, fun, pos);
+ return %CallSiteIsConstructorRT(this);
}
function CallSiteToString() {
@@ -718,7 +699,7 @@ function CallSiteToString() {
var isConstructor = this.isConstructor();
var isMethodCall = !(this.isToplevel() || isConstructor);
if (isMethodCall) {
- var typeName = GetTypeName(GET_PRIVATE(this, CallSiteReceiverKey), true);
+ var typeName = GetTypeName(GET_PRIVATE(this, callSiteReceiverSymbol), true);
var methodName = this.getMethodName();
if (functionName) {
if (typeName &&
@@ -894,8 +875,6 @@ function GetTypeName(receiver, requireConstructor) {
return constructorName;
}
-var formatted_stack_trace_symbol = NEW_PRIVATE("formatted stack trace");
-
// Format the stack trace if not yet done, and return it.
// Cache the formatted stack trace on the holder.
@@ -904,10 +883,10 @@ var StackTraceGetter = function() {
var holder = this;
while (holder) {
var formatted_stack_trace =
- GET_PRIVATE(holder, formatted_stack_trace_symbol);
+ GET_PRIVATE(holder, formattedStackTraceSymbol);
if (IS_UNDEFINED(formatted_stack_trace)) {
// No formatted stack trace available.
- var stack_trace = GET_PRIVATE(holder, $stackTraceSymbol);
+ var stack_trace = GET_PRIVATE(holder, stackTraceSymbol);
if (IS_UNDEFINED(stack_trace)) {
// Neither formatted nor structured stack trace available.
// Look further up the prototype chain.
@@ -915,8 +894,8 @@ var StackTraceGetter = function() {
continue;
}
formatted_stack_trace = FormatStackTrace(holder, stack_trace);
- SET_PRIVATE(holder, $stackTraceSymbol, UNDEFINED);
- SET_PRIVATE(holder, formatted_stack_trace_symbol, formatted_stack_trace);
+ SET_PRIVATE(holder, stackTraceSymbol, UNDEFINED);
+ SET_PRIVATE(holder, formattedStackTraceSymbol, formatted_stack_trace);
}
return formatted_stack_trace;
}
@@ -927,9 +906,9 @@ var StackTraceGetter = function() {
// If the receiver equals the holder, set the formatted stack trace that the
// getter returns.
var StackTraceSetter = function(v) {
- if (HAS_PRIVATE(this, $stackTraceSymbol)) {
- SET_PRIVATE(this, $stackTraceSymbol, UNDEFINED);
- SET_PRIVATE(this, formatted_stack_trace_symbol, v);
+ if (HAS_PRIVATE(this, stackTraceSymbol)) {
+ SET_PRIVATE(this, stackTraceSymbol, UNDEFINED);
+ SET_PRIVATE(this, formattedStackTraceSymbol, v);
}
};
@@ -973,7 +952,7 @@ function DefineError(global, f) {
// object. This avoids going through getters and setters defined
// on prototype objects.
if (!IS_UNDEFINED(m)) {
- %AddNamedProperty(this, 'message', ToString(m), DONT_ENUM);
+ %AddNamedProperty(this, 'message', TO_STRING(m), DONT_ENUM);
}
} else {
return new f(m);
@@ -1044,22 +1023,24 @@ captureStackTrace = function captureStackTrace(obj, cons_opt) {
GlobalError.captureStackTrace = captureStackTrace;
-utils.ExportToRuntime(function(to) {
- to.Error = GlobalError;
- to.EvalError = GlobalEvalError;
- to.RangeError = GlobalRangeError;
- to.ReferenceError = GlobalReferenceError;
- to.SyntaxError = GlobalSyntaxError;
- to.TypeError = GlobalTypeError;
- to.URIError = GlobalURIError;
- to.GetStackTraceLine = GetStackTraceLine;
- to.NoSideEffectToString = NoSideEffectToString;
- to.ToDetailString = ToDetailString;
- to.MakeError = MakeGenericError;
- to.MessageGetLineNumber = GetLineNumber;
- to.MessageGetColumnNumber = GetColumnNumber;
- to.MessageGetSourceLine = GetSourceLine;
- to.StackOverflowBoilerplate = StackOverflowBoilerplate;
-});
+%InstallToContext([
+ "error_function", GlobalError,
+ "eval_error_function", GlobalEvalError,
+ "get_stack_trace_line_fun", GetStackTraceLine,
+ "make_error_function", MakeGenericError,
+ "make_range_error", MakeRangeError,
+ "make_type_error", MakeTypeError,
+ "message_get_column_number", GetColumnNumber,
+ "message_get_line_number", GetLineNumber,
+ "message_get_source_line", GetSourceLine,
+ "no_side_effect_to_string_fun", NoSideEffectToString,
+ "range_error_function", GlobalRangeError,
+ "reference_error_function", GlobalReferenceError,
+ "stack_overflow_boilerplate", StackOverflowBoilerplate,
+ "syntax_error_function", GlobalSyntaxError,
+ "to_detail_string_fun", ToDetailString,
+ "type_error_function", GlobalTypeError,
+ "uri_error_function", GlobalURIError,
+]);
});
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e219cc748a..7fa4d5d66a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -32,6 +32,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
+#include "src/mips/assembler-mips.h"
+
#if V8_TARGET_ARCH_MIPS
#include "src/base/bits.h"
@@ -2024,6 +2026,7 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
+ DCHECK(!src.rm().is(at));
if (IsFp64Mode()) {
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(LWC1, src.rm(), fd,
@@ -2069,6 +2072,8 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
+ DCHECK(!src.rm().is(at));
+ DCHECK(!src.rm().is(t8));
if (IsFp64Mode()) {
if (is_int16(src.offset_) && is_int16(src.offset_ + kIntSize)) {
GenInstrImmediate(SWC1, src.rm(), fd,
@@ -2317,12 +2322,12 @@ void Assembler::abs_d(FPURegister fd, FPURegister fs) {
void Assembler::mov_d(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, D, f0, fs, fd, MOV_S);
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
}
void Assembler::mov_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, MOV_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 4db04b065f..c47f6d3abe 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -335,7 +335,8 @@ const FPURegister f31 = { 31 };
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips32r6 for compare operations.
-#define kDoubleCompareReg f31
+// We use the last non-callee saved odd register for O32 ABI
+#define kDoubleCompareReg f19
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index b9607e05c4..f4da194579 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -8,7 +8,6 @@
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/interpreter/bytecodes.h"
#include "src/runtime/runtime.h"
@@ -26,12 +25,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- a0 : number of arguments excluding receiver
// -- a1 : called function (only guaranteed when
// -- extra_args requires it)
- // -- cp : context
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * agrc] : receiver
// -----------------------------------
+ __ AssertFunction(a1);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -142,7 +148,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -150,121 +157,135 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
- Register function = a1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
- __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ __ Drop(2);
}
- // Load the first arguments in a0 and get rid of the rest.
- Label no_arguments;
- __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
- // First args = sp[(argc - 1) * 4].
- __ Subu(a0, a0, Operand(1));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(sp, a0, sp);
- __ lw(a0, MemOperand(sp));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = a2;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(a0, // Input.
- argument, // Result.
- a3, // Scratch.
- t0, // Scratch.
- t1, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0);
- __ bind(&argument_is_string);
+ // 2a. At least one argument, return a0 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(a0, &to_string);
+ __ GetObjectType(a0, a1, a1);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ Subu(a1, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(&symbol_descriptive_string, eq, a1, Operand(zero_reg));
+ __ Branch(&to_string, gt, a1, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ }
- // ----------- S t a t e -------------
- // -- a2 : argument converted to string
- // -- a1 : constructor function
- // -- ra : return address
- // -----------------------------------
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+ __ DropAndRet(1);
+ }
- Label gc_required;
- __ Allocate(JSValue::kSize,
- v0, // Result.
- a3, // Scratch.
- t0, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = a3;
- __ LoadGlobalFunctionInitialMap(function, map, t0);
- if (FLAG_debug_code) {
- __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
- t0, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
- t0, Operand(zero_reg));
+ // 3a. Convert a0 to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
- __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ // 3b. Convert symbol in a0 to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ Push(a0);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset));
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
- __ Ret();
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Subu(a0, a0, Operand(1));
+ __ sll(a0, a0, kPointerSizeLog2);
+ __ Addu(sp, a0, sp);
+ __ lw(a0, MemOperand(sp));
+ __ Drop(2);
+ __ jmp(&done);
+ __ bind(&no_arguments);
+ __ LoadRoot(a0, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ bind(&done);
+ }
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(a0, &convert_argument);
-
- // Is it a String?
- __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ And(t0, a3, Operand(kIsNotStringMask));
- __ Branch(&convert_argument, ne, t0, Operand(zero_reg));
- __ mov(argument, a0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
- __ Branch(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into a2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0);
+ // 2. Make sure a0 is a string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a2, a2);
+ __ And(t0, a2, Operand(kIsNotStringMask));
+ __ Branch(&done_convert, eq, t0, Operand(zero_reg));
+ __ bind(&convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(a1);
+ __ CallStub(&stub);
+ __ Move(a0, v0);
+ __ Pop(a1);
+ }
+ __ bind(&done_convert);
}
- __ pop(function);
- __ mov(argument, v0);
- __ Branch(&argument_is_string);
- // Load the empty string into a2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ Branch(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- a0 : the first argument
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in eax.
+ __ LoadGlobalFunctionInitialMap(a1, a2, a3);
+ __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ Ret(USE_DELAY_SLOT);
+ __ sw(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Move(a2, Smi::FromInt(JSValue::kSize));
+ __ Push(a0, a1, a2);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(a0, a1);
+ }
+ __ jmp(&done_allocate);
}
- __ Ret();
}
@@ -314,8 +335,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -325,9 +345,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
Isolate* isolate = masm->isolate();
// Enter a construct frame.
@@ -394,9 +411,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a2: initial map
Label rt_call_reload_new_target;
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- if (create_memento) {
- __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
- }
__ Allocate(a3, t4, t5, t6, &rt_call_reload_new_target, SIZE_IN_WORDS);
@@ -404,7 +418,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size (including memento if create_memento)
+ // a3: object size
// t4: JSObject (not tagged)
__ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex);
__ mov(t5, t4);
@@ -419,7 +433,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with appropriate filler.
// a1: constructor function
// a2: initial map
- // a3: object size (in words, including memento if create_memento)
+ // a3: object size (in words)
// t4: JSObject (not tagged)
// t5: First in-object property of JSObject (not tagged)
// t2: slack tracking counter (non-API function case)
@@ -459,29 +473,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ Subu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize));
- __ sll(a0, a0, kPointerSizeLog2);
- __ Addu(a0, t4, Operand(a0)); // End of object.
- __ InitializeFieldsWithFiller(t5, a0, t7);
-
- // Fill in memento fields.
- // t5: points to the allocated but uninitialized memento.
- __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex);
- DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- // Load the AllocationSite.
- __ lw(t7, MemOperand(sp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(a2, t0);
- DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
- __ sw(t7, MemOperand(t5));
- __ Addu(t5, t5, kPointerSize);
- } else {
- __ sll(at, a3, kPointerSizeLog2);
- __ Addu(a0, t4, Operand(at)); // End of object.
- __ InitializeFieldsWithFiller(t5, a0, t7);
- }
+ __ sll(at, a3, kPointerSizeLog2);
+ __ Addu(a0, t4, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t5, a0, t7);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -500,45 +494,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a3: original constructor
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ lw(a2, MemOperand(sp, 3 * kPointerSize));
- __ push(a2); // argument 1: allocation site
- }
__ Push(a1, a3); // arguments 2-3 / 1-2
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mov(t4, v0);
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
// Receiver for constructor call allocated.
// t4: JSObject
__ bind(&allocated);
- if (create_memento) {
- __ lw(a2, MemOperand(sp, 3 * kPointerSize));
- __ LoadRoot(t5, Heap::kUndefinedValueRootIndex);
- __ Branch(&count_incremented, eq, a2, Operand(t5));
- // a2 is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ lw(a3, FieldMemOperand(a2,
- AllocationSite::kPretenureCreateCountOffset));
- __ Addu(a3, a3, Operand(Smi::FromInt(1)));
- __ sw(a3, FieldMemOperand(a2,
- AllocationSite::kPretenureCreateCountOffset));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ Pop(a3); // new.target
__ Pop(a1);
@@ -637,12 +601,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -736,8 +700,7 @@ enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers a2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset, Register argc,
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -758,12 +721,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ Branch(&okay, gt, a2, Operand(t3));
// Out of stack space.
- __ lw(a1, MemOperand(fp, calleeOffset));
- if (argc_is_tagged == kArgcIsUntaggedInt) {
- __ SmiTag(argc);
- }
- __ Push(a1, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -774,7 +732,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Called from JSEntryStub::GenerateBody
// ----------- S t a t e -------------
- // -- a0: code entry
+ // -- a0: new.target
// -- a1: function
// -- a2: receiver_pointer
// -- a3: argc
@@ -789,19 +747,21 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ li(cp, Operand(context_address));
+ __ lw(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
__ Push(a1, a2);
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Clobbers a2.
- Generate_CheckStackOverflow(masm, kFunctionOffset, a3, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, a3, kArgcIsUntaggedInt);
+
+ // Remember new.target.
+ __ mov(t1, a0);
// Copy arguments to the stack in a loop.
// a3: argc
@@ -820,6 +780,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ Branch(&loop, ne, s0, Operand(t2));
+ // Setup new.target and argc.
+ __ mov(a0, a3);
+ __ mov(a3, t1);
+
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
@@ -831,17 +795,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- // No type feedback cell is available
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
- }
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
// Leave internal frame.
}
@@ -912,7 +870,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Subu(t1, sp, Operand(t0));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, t1, Operand(a2));
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -996,8 +954,11 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments.
- __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+
+ // Drop receiver + arguments and return.
+ __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ Addu(sp, sp, at);
__ Jump(ra);
}
@@ -1250,129 +1211,31 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
- { Label done;
+ {
+ Label done;
__ Branch(&done, ne, a0, Operand(zero_reg));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ push(t2);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ Addu(a0, a0, Operand(1));
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
+ // 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
- Label slow, non_function;
__ sll(at, a0, kPointerSizeLog2);
__ addu(at, sp, at);
__ lw(a1, MemOperand(at));
- __ JumpIfSmi(a1, &non_function);
- __ GetObjectType(a1, a2, a2);
- __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // 3a. Patch the first argument if necessary when calling a function.
- // a0: actual number of arguments
- // a1: function
- Label shift_arguments;
- __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a3).
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg));
-
- // Compute the receiver in sloppy mode.
- // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
- __ lw(a2, MemOperand(a2, -kPointerSize));
- // a0: actual number of arguments
- // a1: function
- // a2: first argument
- __ JumpIfSmi(a2, &convert_to_object, t2);
-
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_proxy, eq, a2, Operand(a3));
- __ LoadRoot(a3, Heap::kNullValueRootIndex);
- __ Branch(&use_global_proxy, eq, a2, Operand(a3));
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&convert_to_object);
- // Enter an internal frame in order to preserve argument count.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ sll(a0, a0, kSmiTagSize); // Smi tagged.
- __ push(a0);
- __ mov(a0, a2);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a2, v0);
-
- __ pop(a0);
- __ sra(a0, a0, kSmiTagSize); // Un-tag.
- // Leave internal frame.
- }
-
- // Restore the function to a1, and the flag to t0.
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(at, sp, at);
- __ lw(a1, MemOperand(at));
- __ Branch(USE_DELAY_SLOT, &patch_receiver);
- __ li(t0, Operand(0, RelocInfo::NONE32)); // In delay slot.
-
- __ bind(&use_global_proxy);
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a3, sp, at);
- __ sw(a2, MemOperand(a3, -kPointerSize));
-
- __ Branch(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
- __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- __ bind(&non_function);
- __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // a0: actual number of arguments
- // a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- __ sll(at, a0, kPointerSizeLog2);
- __ addu(a2, sp, at);
- __ sw(a1, MemOperand(a2, -kPointerSize));
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// a0: actual number of arguments
// a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
- { Label loop;
+ {
+ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ sll(at, a0, kPointerSizeLog2);
__ addu(a2, sp, at);
@@ -1388,46 +1251,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // a0: actual number of arguments
- // a1: function
- // t0: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ Branch(&function, eq, t0, Operand(zero_reg));
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(a2, zero_reg);
- __ Branch(&non_proxy, ne, t0, Operand(1));
-
- __ push(a1); // Re-add proxy object as additional argument.
- __ Addu(a0, a0, Operand(1));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // a0: actual number of arguments
- // a1: function
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ sra(a2, a2, kSmiTagSize);
- // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
-
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1495,106 +1320,35 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(a1);
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ push(a0);
- __ lw(a0, MemOperand(fp, kArgumentsOffset)); // Get the args array.
- __ push(a0);
+ __ lw(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array.
+ __ Push(a0, a1);
// Returns (in v0) number of arguments to copy to stack as Smi.
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
// Returns the result in v0.
- Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ mov(a1, zero_reg);
- __ Push(v0, a1); // Limit and initial index.
-
- // Get the receiver.
- __ lw(a0, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Change context eagerly to get the right global object if necessary.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in a1.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_proxy;
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
- kSmiTagSize)));
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a2).
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg));
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(a0, &call_to_object);
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ Branch(&use_global_proxy, eq, a0, Operand(a1));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_proxy, eq, a0, Operand(a2));
-
- // Check if the receiver is already a JavaScript object.
- // a0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Convert the receiver to a regular object.
- // a0: receiver
- __ bind(&call_to_object);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
- __ Branch(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- // a0: receiver
- __ bind(&push_receiver);
- __ push(a0);
+ __ lw(a2, MemOperand(fp, kReceiverOffset));
+ __ Push(v0, a1, a2); // limit, initial index and receiver.
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(a0);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ lw(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
-
- frame_scope.GenerateLeaveFrame();
- __ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(a1); // Add function proxy as last argument.
- __ Addu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
@@ -1637,10 +1391,11 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ lw(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
__ push(a0);
// Returns argument count in v0.
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
// Returns result in v0.
- Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
@@ -1734,6 +1489,256 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(a1);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+ __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ Branch(&done_convert, ne, at, Operand(zero_reg));
+ {
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ lw(a3, MemOperand(at));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- a3 : the receiver
+ // -- cp : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, t0, t0);
+ __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ sll(a0, a0, kSmiTagSize); // Smi tagged.
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a3, v0);
+ __ Pop(a0, a1);
+ __ sra(a0, a0, kSmiTagSize); // Un-tag.
+ }
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ sw(a3, MemOperand(at));
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ sra(a2, a2, kSmiTagSize); // Un-tag.
+ __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(a0);
+ ParameterCount expected(a2);
+ __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(a1, &non_callable);
+ __ bind(&non_smi);
+ __ GetObjectType(a1, t1, t2);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
+ eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(a1);
+ __ Branch(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+ // Overwrite the original receiver with the (original) target.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ sw(a1, MemOperand(at));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunction)
+ // -- a3 : the original constructor (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(a1);
+ __ AssertFunction(a3);
+
+ // Calling convention for function specific ConstructStubs require
+ // a2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunctionProxy)
+ // -- a3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ lw(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(a1, &non_constructor);
+ __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t2, t2, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_constructor, eq, t2, Operand(zero_reg));
+
+ // Dispatch based on instance type.
+ __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ sll(at, a0, kPointerSizeLog2);
+ __ addu(at, sp, at);
+ __ sw(a1, MemOperand(at));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+
+ // Find the address of the last argument.
+ __ Addu(a3, a0, Operand(1)); // Add one for receiver.
+ __ sll(a3, a3, kPointerSizeLog2);
+ __ Subu(a3, a2, Operand(a3));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ lw(t0, MemOperand(a2));
+ __ Addu(a2, a2, Operand(-kPointerSize));
+ __ push(t0);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(a3));
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -1761,26 +1766,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into a0 and copy end address into a2.
+ // Calculate copy start address into a0 and copy end address into t1.
__ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize);
__ Addu(a0, fp, a0);
// Adjust for return address and receiver.
__ Addu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address.
- __ sll(a2, a2, kPointerSizeLog2);
- __ subu(a2, a0, a2);
+ __ sll(t1, a2, kPointerSizeLog2);
+ __ subu(t1, a0, t1);
// Copy the arguments (including the receiver) to the new stack frame.
// a0: copy start address
// a1: function
- // a2: copy end address
+ // a2: expected number of arguments
// a3: code entry to call
+ // t1: copy end address
Label copy;
__ bind(&copy);
__ lw(t0, MemOperand(a0));
__ push(t0);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+ __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(t1));
__ addiu(a0, a0, -kPointerSize); // In delay slot.
__ jmp(&invoke);
@@ -1811,7 +1817,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into a0 and copy end address is fp.
+ // Calculate copy start address into a0 and copy end address into t3.
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
@@ -1843,21 +1849,23 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a3: code entry to call
__ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
__ sll(t2, a2, kPointerSizeLog2);
- __ Subu(a2, fp, Operand(t2));
+ __ Subu(t1, fp, Operand(t2));
// Adjust for frame.
- __ Subu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ Subu(t1, t1, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ Subu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
+ __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(t1));
__ sw(t0, MemOperand(sp));
}
// Call the entry point.
__ bind(&invoke);
-
+ __ mov(a0, a2);
+ // a0 : expected number of arguments
+ // a1 : function (passed through to callee)
__ Call(a3);
// Store offset of return address for deoptimizer.
@@ -1878,7 +1886,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ break_(0xCC);
}
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 211eaf9359..b12cb718ab 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -12,6 +12,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/mips/code-stubs-mips.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
@@ -726,29 +727,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// a1 (rhs) second.
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cc == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cc == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cc == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result.
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // Remaining cases.
- ncr = LESS;
- }
- __ li(a0, Operand(Smi::FromInt(ncr)));
- __ push(a0);
+ DCHECK(cc == gt || cc == ge); // Remaining cases.
+ ncr = LESS;
}
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1413,202 +1410,105 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-// Uses registers a0 to t0.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: a0 or at sp + 1 * kPointerSize.
-// * function: a1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register t0.
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = a0; // Object (lhs).
- Register map = a3; // Map of the object.
- const Register function = a1; // Function (rhs).
- const Register prototype = t0; // Prototype of the function.
- const Register inline_site = t5;
- const Register scratch = a2;
-
- const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ lw(object, MemOperand(sp, 1 * kPointerSize));
- __ lw(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(at));
- __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = a1; // Object (lhs).
+ Register const function = a0; // Function (rhs).
+ Register const object_map = a2; // Map of {object}.
+ Register const function_map = a3; // Map of {function}.
+ Register const function_prototype = t0; // Prototype of {function}.
+ Register const scratch = t1;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&fast_case, ne, function, Operand(at));
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&fast_case, ne, object_map, Operand(at));
+ __ Ret(USE_DELAY_SLOT);
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ GetObjectType(function, function_map, scratch);
+ __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex); // In delay slot.
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ GetObjectType(function, function_map, scratch);
+ __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- DCHECK(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in t0 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- __ LoadFromSafepointRegisterSlot(scratch, t0);
- __ Subu(inline_site, ra, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
- __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
-
- __ mov(t4, map);
- // |scratch| points at the beginning of the cell. Calculate the field
- // containing the map.
- __ Addu(function, scratch, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(scratch, Cell::kValueOffset, t4, function,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
-
- // Register mapping: a3 is object map and t0 is function prototype.
- // Get prototype of object into a2.
- __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // Ensure that {function} has an instance prototype.
+ __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ lw(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(scratch,
+ FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
+ __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ // Get the "prototype" (or initial map) of the {function}.
+ __ lw(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ GetObjectType(function_prototype, scratch, scratch);
+ __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
+ __ lw(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ Branch(&is_instance, eq, scratch, Operand(prototype));
- __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
- __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ Branch(&loop);
-
- __ bind(&is_instance);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- if (!HasCallSiteInlineCheck()) {
- __ mov(v0, zero_reg);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- }
- } else {
- // Patch the call site to return true.
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ mov(v0, zero_reg);
- }
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- }
- } else {
- // Patch the call site to return false.
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- }
-
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ GetObjectType(function, scratch2, scratch);
- __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Null is not instance of anything.
- __ Branch(&object_not_null, ne, object,
- Operand(isolate()->factory()->null_value()));
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+ __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object_prototype, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
+ __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ Ret(USE_DELAY_SLOT);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(a0, a1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ mov(a0, v0);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1684,74 +1584,70 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime,
- ne,
- a2,
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
- __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sw(a2, MemOperand(sp, 0 * kPointerSize));
+ __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ sll(t3, a2, 1);
- __ Addu(a3, a3, Operand(t3));
- __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
- __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+ __ Addu(t0, t0, Operand(t3));
+ __ addiu(a3, t0, StandardFrameConstants::kCallerSPOffset);
__ bind(&runtime);
+ __ Push(a1, a3, a2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
// Registers used over whole function:
- // t2 : allocated object (tagged)
- // t5 : mapped parameter count (tagged)
+ // t1 : arguments count (tagged)
+ // t2 : mapped parameter count (tagged)
- __ lw(a1, MemOperand(sp, 0 * kPointerSize));
- // a1 = parameter count (tagged)
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a2,
+ Label adaptor_frame, try_allocate, runtime;
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
- __ mov(a2, a1);
- __ b(&try_allocate);
- __ nop(); // Branch delay slot nop.
+ __ mov(t1, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(t2, a2); // In delay slot.
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sll(t6, a2, 1);
- __ Addu(a3, a3, Operand(t6));
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sw(a3, MemOperand(sp, 1 * kPointerSize));
-
- // a1 = parameter count (tagged)
- // a2 = argument count (tagged)
- // Compute the mapped parameter count = min(a1, a2) in a1.
- Label skip_min;
- __ Branch(&skip_min, lt, a1, Operand(a2));
- __ mov(a1, a2);
- __ bind(&skip_min);
+ __ lw(t1, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(t6, t1, 1);
+ __ Addu(t0, t0, Operand(t6));
+ __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // t1 = argument count (tagged)
+ // t2 = parameter count (tagged)
+ // Compute the mapped parameter count = min(t2, t1) in t2.
+ __ mov(t2, a2);
+ __ Branch(&try_allocate, le, t2, Operand(t1));
+ __ mov(t2, t1);
__ bind(&try_allocate);
@@ -1762,14 +1658,14 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
- __ mov(t5, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
- __ sll(t5, a1, 1);
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, t2, Operand(zero_reg));
+ __ mov(t5, zero_reg); // In delay slot: param map size = 0 when t2 == 0.
+ __ sll(t5, t2, 1);
__ addiu(t5, t5, kParameterMapHeaderSize);
__ bind(&param_map_size);
// 2. Backing store.
- __ sll(t6, a2, 1);
+ __ sll(t6, t1, 1);
__ Addu(t5, t5, Operand(t6));
__ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
@@ -1777,7 +1673,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Addu(t5, t5, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
+ __ Allocate(t5, v0, t0, t5, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1790,37 +1686,36 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ Branch(&skip2_ne, ne, t2, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kNormalOffset));
__ bind(&skip2_ne);
- __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ Branch(&skip2_eq, eq, t2, Operand(zero_reg));
__ lw(t0, MemOperand(t0, kAliasedOffset));
__ bind(&skip2_eq);
// v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
// a2 = argument count (smi-tagged)
// t0 = address of arguments map (tagged)
+ // t2 = mapped parameter count (tagged)
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(t5, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(t5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(t5, FieldMemOperand(v0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- __ AssertNotSmi(a3);
+ __ AssertNotSmi(a1);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
+ __ sw(a1, FieldMemOperand(v0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(a2);
+ __ AssertSmi(t1);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
- __ sw(a2, FieldMemOperand(v0, kLengthOffset));
+ __ sw(t1, FieldMemOperand(v0, kLengthOffset));
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, t0 will point there, otherwise
@@ -1829,29 +1724,29 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
// a2 = argument count (tagged)
// t0 = address of parameter map or backing store (tagged)
+ // t2 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
Label skip3;
- __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
- // Move backing store address to a3, because it is
+ __ Branch(&skip3, ne, t2, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
// expected there when filling in the unmapped arguments.
- __ mov(a3, t0);
+ __ mov(a1, t0);
__ bind(&skip3);
- __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+ __ Branch(&skip_parameter_map, eq, t2, Operand(Smi::FromInt(0)));
- __ LoadRoot(t2, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ Addu(t2, a1, Operand(Smi::FromInt(2)));
- __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ LoadRoot(t1, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ Addu(t1, t2, Operand(Smi::FromInt(2)));
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
__ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ sll(t6, a1, 1);
- __ Addu(t2, t0, Operand(t6));
- __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
- __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
+ __ sll(t6, t2, 1);
+ __ Addu(t1, t0, Operand(t6));
+ __ Addu(t1, t1, Operand(kParameterMapHeaderSize));
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1862,70 +1757,71 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
Label parameters_loop, parameters_test;
- __ mov(t2, a1);
- __ lw(t5, MemOperand(sp, 0 * kPointerSize));
- __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Subu(t5, t5, Operand(a1));
+ __ mov(t1, t2);
+ __ Addu(t5, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Subu(t5, t5, Operand(t2));
__ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
- __ sll(t6, t2, 1);
- __ Addu(a3, t0, Operand(t6));
- __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
+ __ sll(t6, t1, 1);
+ __ Addu(a1, t0, Operand(t6));
+ __ Addu(a1, a1, Operand(kParameterMapHeaderSize));
- // t2 = loop variable (tagged)
- // a1 = mapping index (tagged)
- // a3 = address of backing store (tagged)
+ // a1 = address of backing store (tagged)
// t0 = address of parameter map (tagged)
- // t1 = temporary scratch (a.o., for address calculation)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
// t3 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
- __ Subu(t2, t2, Operand(Smi::FromInt(1)));
- __ sll(t1, t2, 1);
- __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Addu(t6, t0, t1);
+ __ Subu(t1, t1, Operand(Smi::FromInt(1)));
+ __ sll(a0, t1, 1);
+ __ Addu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Addu(t6, t0, a0);
__ sw(t5, MemOperand(t6));
- __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Addu(t6, a3, t1);
+ __ Subu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Addu(t6, a1, a0);
__ sw(t3, MemOperand(t6));
__ Addu(t5, t5, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
+ __ Branch(&parameters_loop, ne, t1, Operand(Smi::FromInt(0)));
+
+ // t1 = argument count (tagged).
+ __ lw(t1, FieldMemOperand(v0, kLengthOffset));
__ bind(&skip_parameter_map);
- // a2 = argument count (tagged)
- // a3 = address of backing store (tagged)
- // t1 = scratch
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // t1 = argument count (tagged)
+ // t2 = mapped parameter count (tagged)
+ // t5 = scratch
// Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
- __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
+ __ sw(t5, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sw(t1, FieldMemOperand(a1, FixedArray::kLengthOffset));
Label arguments_loop, arguments_test;
- __ mov(t5, a1);
- __ lw(t0, MemOperand(sp, 1 * kPointerSize));
- __ sll(t6, t5, 1);
- __ Subu(t0, t0, Operand(t6));
+ __ sll(t6, t2, 1);
+ __ Subu(a3, a3, Operand(t6));
__ jmp(&arguments_test);
__ bind(&arguments_loop);
- __ Subu(t0, t0, Operand(kPointerSize));
- __ lw(t2, MemOperand(t0, 0));
- __ sll(t6, t5, 1);
- __ Addu(t1, a3, Operand(t6));
- __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ Addu(t5, t5, Operand(Smi::FromInt(1)));
+ __ Subu(a3, a3, Operand(kPointerSize));
+ __ lw(t0, MemOperand(a3, 0));
+ __ sll(t6, t2, 1);
+ __ Addu(t5, a1, Operand(t6));
+ __ sw(t0, FieldMemOperand(t5, FixedArray::kHeaderSize));
+ __ Addu(t2, t2, Operand(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t5, Operand(a2));
+ __ Branch(&arguments_loop, lt, t2, Operand(t1));
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
+ // Return.
+ __ Ret();
// Do the runtime call to allocate the arguments object.
- // a2 = argument count (tagged)
+ // t1 = argument count (tagged)
__ bind(&runtime);
- __ sw(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ Push(a1, a3, t1);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -1954,45 +1850,40 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a3,
+ Label try_allocate, runtime;
+ __ lw(t0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(a0, MemOperand(t0, StandardFrameConstants::kContextOffset));
+ __ Branch(&try_allocate, ne, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Get the length from the frame.
- __ lw(a1, MemOperand(sp, 0));
- __ Branch(&try_allocate);
-
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sw(a1, MemOperand(sp, 0));
- __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, a2, Operand(at));
-
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sw(a3, MemOperand(sp, 1 * kPointerSize));
+ __ lw(a2, MemOperand(t0, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(t0, t0, Operand(at));
+ __ Addu(a3, t0, Operand(StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
- __ srl(a1, a1, kSmiTagSize);
+ __ SmiUntag(t5, a2);
+ __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
- __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ Addu(t5, t5, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Addu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+ __ Addu(t5, t5, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ Allocate(a1, v0, a2, a3, &runtime,
+ __ Allocate(t5, v0, t0, t1, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
@@ -2002,86 +1893,55 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sw(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sw(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ lw(a1, MemOperand(sp, 0 * kPointerSize));
- __ AssertSmi(a1);
- __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ AssertSmi(a2);
+ __ sw(a2,
+ FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
Label done;
- __ Branch(&done, eq, a1, Operand(zero_reg));
-
- // Get the parameters pointer from the stack.
- __ lw(a2, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(&done, eq, a2, Operand(zero_reg));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ Addu(t0, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
- __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
- __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ srl(a1, a1, kSmiTagSize);
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sw(t1, FieldMemOperand(t0, FixedArray::kMapOffset));
+ __ sw(a2, FieldMemOperand(t0, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
// Copy the fixed array slots.
Label loop;
// Set up t0 to point to the first array slot.
__ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement a3 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ Addu(a2, a2, Operand(-kPointerSize));
- __ lw(a3, MemOperand(a2));
+ __ Addu(a3, a3, Operand(-kPointerSize));
+ __ lw(t1, MemOperand(a3));
// Post-increment t0 with kPointerSize on each iteration.
- __ sw(a3, MemOperand(t0));
+ __ sw(t1, MemOperand(t0));
__ Addu(t0, t0, Operand(kPointerSize));
- __ Subu(a1, a1, Operand(1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
+ __ Subu(a2, a2, Operand(1));
+ __ Branch(&loop, ne, a2, Operand(zero_reg));
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ DropAndRet(3);
+ __ Ret();
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ Push(a1, a3, a2);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // sp[0] : language mode
- // sp[4] : index of rest parameter
- // sp[8] : number of parameters
- // sp[12] : receiver displacement
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sw(a1, MemOperand(sp, 2 * kPointerSize));
- __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(a3, a2, Operand(at));
-
- __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sw(a3, MemOperand(sp, 3 * kPointerSize));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2573,27 +2433,24 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&done, eq, t2, Operand(at));
__ lw(feedback_map, FieldMemOperand(t2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
- __ Branch(FLAG_pretenuring_call_new ? &miss : &check_allocation_site, ne,
- feedback_map, Operand(at));
+ __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, feedback_map, Operand(at));
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, feedback_map, Operand(at));
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
- __ Branch(&megamorphic, ne, a1, Operand(t2));
- __ jmp(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ Branch(&megamorphic, ne, a1, Operand(t2));
+ __ jmp(&done);
__ bind(&miss);
@@ -2612,23 +2469,20 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
- __ Branch(&not_array_function, ne, a1, Operand(t2));
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ Branch(&done);
-
- __ bind(&not_array_function);
- }
-
- CreateWeakCellStub create_stub(masm->isolate());
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t2);
+ __ Branch(&not_array_function, ne, a1, Operand(t2));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ Branch(&done);
+
+ __ bind(&not_array_function);
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2647,30 +2501,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ Branch(non_function, ne, t0, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // put proxy as additional argument
- __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
- __ mov(a2, zero_reg);
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ sw(a1, MemOperand(sp, argc * kPointerSize));
- __ li(a0, Operand(argc)); // Set up the number of arguments.
- __ mov(a2, zero_reg);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ li(a0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2692,12 +2525,12 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// a1 : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ __ JumpIfSmi(a1, &slow);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, t0, t0);
@@ -2732,7 +2565,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -2754,34 +2587,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// t0 : original constructor (for IsSuperConstructorCall)
- Label slow, non_function_call;
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(a1, &non_function_call);
+ __ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, t1, t1);
- __ Branch(&slow, ne, t1, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t1, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t1, a2, at);
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into a2.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by a3 + 1.
- __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
- __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ lw(a2, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ lw(t1, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, t1, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, t1);
}
@@ -2793,60 +2619,28 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mov(a3, a1);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = t0;
- __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
- // a0: number of arguments
- // a1: called object
- // t1: object type
- Label do_call;
- __ bind(&slow);
- __ Branch(&non_function_call, ne, t1, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(vector, FieldMemOperand(vector,
- JSFunction::kSharedFunctionInfoOffset));
- __ lw(vector, FieldMemOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ __ mov(a3, a1);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a1 - function
// a3 - slot id
// a2 - vector
- Label miss;
-
+ // t0 - loaded from vector[slot]
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
- __ Branch(&miss, ne, a1, Operand(at));
+ __ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
- __ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(at, a2, Operand(at));
- __ lw(t0, FieldMemOperand(at, FixedArray::kHeaderSize));
-
- // Verify that t0 contains an AllocationSite
- __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, t1, Operand(at));
// Increment the call count for monomorphic function calls.
__ sll(at, a3, kPointerSizeLog2 - kSmiTagSize);
@@ -2859,18 +2653,6 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- // Unreachable.
- __ stop("Unexpected code address");
}
@@ -2883,7 +2665,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2937,7 +2719,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2945,11 +2727,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&slow_start, eq, t0, Operand(at));
+ // Verify that t0 contains an AllocationSite
+ __ lw(t1, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&not_allocation_site, ne, t1, Operand(at));
+
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -3025,7 +2816,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
// r1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ __ JumpIfSmi(a1, &slow);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, t0, t0);
@@ -3041,10 +2832,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -3507,7 +3295,39 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_string;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_string);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ lw(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3617,35 +3437,34 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ lw(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
- __ lw(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertString(a1);
+ __ AssertString(a0);
Label not_same;
__ Branch(&not_same, ne, a0, Operand(a1));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
- __ DropAndRet(2);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
+ a2);
+ __ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
- // Compare flat ASCII strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
- __ Addu(sp, sp, Operand(2 * kPointerSize));
+ // Compare flat ASCII strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
+ __ Push(a1, a0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3678,6 +3497,30 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ lw(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ lw(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ Subu(v0, a1, a0);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -3995,8 +3838,20 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Branch(&miss, ne, a2, Operand(t0));
__ Branch(&miss, ne, a3, Operand(t0));
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a0, a1);
+ if (Token::IsEqualityOp(op())) {
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a0, a1);
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ li(a2, Operand(Smi::FromInt(GREATER)));
+ } else {
+ __ li(a2, Operand(Smi::FromInt(LESS)));
+ }
+ __ Push(a1, a0, a2);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4594,33 +4449,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, a2);
+ __ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, a2);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4629,11 +4477,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4745,8 +4592,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&not_array, ne, at, Operand(scratch1));
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, t5, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
__ bind(&not_array);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
@@ -4807,8 +4653,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, t5, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, true, &miss);
__ bind(&not_array);
// Is it generic?
@@ -4827,8 +4672,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Addu(feedback, vector, Operand(at));
__ lw(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, t5, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, t5, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4840,14 +4684,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4864,11 +4708,52 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorStoreICDescriptor::NameRegister(); // a2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // t0
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+ Register feedback = t1;
+ Register receiver_map = t2;
+ Register scratch1 = t5;
+
+ __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(feedback, vector, Operand(scratch1));
+ __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&not_array, ne, scratch1, Operand(at));
+
+ Register scratch2 = t4;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+ __ Branch(&miss, ne, feedback, Operand(at));
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ Branch(USE_DELAY_SLOT, &compare_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
@@ -4882,12 +4767,132 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+ __ lw(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ sll(scratch1, too_far, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(too_far, feedback, Operand(scratch1));
+ __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ lw(cached_map, MemOperand(pointer_reg));
+ __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
+ // Is it a transitioning store?
+ __ lw(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&transition_call, ne, too_far, Operand(at));
+ __ lw(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ Addu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&transition_call);
+ __ lw(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ lw(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mov(feedback, too_far);
+
+ __ Addu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&prepare_next);
+ __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorStoreICDescriptor::NameRegister(); // a2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // t0
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+ Register feedback = t1;
+ Register receiver_map = t2;
+ Register scratch1 = t5;
+
+ __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(feedback, vector, Operand(scratch1));
+ __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&not_array, ne, scratch1, Operand(at));
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+
+ Register scratch2 = t4;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+ __ Branch(&try_poly_name, ne, feedback, Operand(at));
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Branch(&miss, ne, key, Operand(feedback));
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ sll(scratch1, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(feedback, vector, Operand(scratch1));
+ __ lw(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ Branch(USE_DELAY_SLOT, &compare_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 6ef8fc6beb..4a1255e1b4 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/mips/codegen-mips.h"
+
#if V8_TARGET_ARCH_MIPS
#include "src/codegen.h"
diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc
index 40d497fd99..ff8a79f1b2 100644
--- a/deps/v8/src/mips/constants-mips.cc
+++ b/deps/v8/src/mips/constants-mips.cc
@@ -219,151 +219,6 @@ bool Instruction::IsTrap() const {
}
-Instruction::Type Instruction::InstructionType() const {
- switch (OpcodeFieldRaw()) {
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JR:
- case JALR:
- case BREAK:
- case SLL:
- case SRL:
- case SRA:
- case SLLV:
- case SRLV:
- case SRAV:
- case MFHI:
- case MFLO:
- case MULT:
- case MULTU:
- case DIV:
- case DIVU:
- case ADD:
- case ADDU:
- case SUB:
- case SUBU:
- case AND:
- case OR:
- case XOR:
- case NOR:
- case SLT:
- case SLTU:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- case MOVZ:
- case MOVN:
- case MOVCI:
- case SELEQZ_S:
- case SELNEZ_S:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- break;
- case SPECIAL2:
- switch (FunctionFieldRaw()) {
- case MUL:
- case CLZ:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- break;
- case SPECIAL3:
- switch (FunctionFieldRaw()) {
- case INS:
- case EXT:
- return kRegisterType;
- case BSHFL: {
- int sa = SaFieldRaw() >> kSaShift;
- switch (sa) {
- case BITSWAP:
- return kRegisterType;
- case WSBH:
- case SEB:
- case SEH:
- return kUnsupported;
- }
- sa >>= kBp2Bits;
- switch (sa) {
- case ALIGN:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- }
- default:
- return kUnsupported;
- }
- break;
- case COP1: // Coprocessor instructions.
- switch (RsFieldRawNoAssert()) {
- case BC1: // Branch on coprocessor condition.
- case BC1EQZ:
- case BC1NEZ:
- return kImmediateType;
- default:
- return kRegisterType;
- }
- break;
- case COP1X:
- return kRegisterType;
- // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
- case REGIMM:
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- case ADDI:
- case DADDI:
- case ADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- case BEQL:
- case BNEL:
- case BLEZL:
- case BGTZL:
- case POP66:
- case POP76:
- case LB:
- case LH:
- case LWL:
- case LW:
- case LBU:
- case LHU:
- case LWR:
- case SB:
- case SH:
- case SWL:
- case SW:
- case SWR:
- case LWC1:
- case LDC1:
- case SWC1:
- case SDC1:
- case PCREL:
- case BC:
- case BALC:
- return kImmediateType;
- // 26 bits immediate type instructions. e.g.: j imm26.
- case J:
- case JAL:
- return kJumpType;
- default:
- return kUnsupported;
- }
- return kUnsupported;
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 37ac2336bf..fcbda80191 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -820,6 +820,7 @@ const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
const Instr nopInstr = 0;
+
class Instruction {
public:
enum {
@@ -858,9 +859,57 @@ class Instruction {
kUnsupported = -1
};
- // Get the encoding type of the instruction.
- Type InstructionType() const;
+ enum TypeChecks { NORMAL, EXTRA };
+
+
+#define OpcodeToBitNumber(opcode) \
+ (1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift))
+
+ static const uint64_t kOpcodeImmediateTypeMask =
+ OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
+ OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
+ OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) |
+ OpcodeToBitNumber(DADDI) | OpcodeToBitNumber(ADDIU) |
+ OpcodeToBitNumber(SLTI) | OpcodeToBitNumber(SLTIU) |
+ OpcodeToBitNumber(ANDI) | OpcodeToBitNumber(ORI) |
+ OpcodeToBitNumber(XORI) | OpcodeToBitNumber(LUI) |
+ OpcodeToBitNumber(BEQL) | OpcodeToBitNumber(BNEL) |
+ OpcodeToBitNumber(BLEZL) | OpcodeToBitNumber(BGTZL) |
+ OpcodeToBitNumber(POP66) | OpcodeToBitNumber(POP76) |
+ OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) | OpcodeToBitNumber(LWL) |
+ OpcodeToBitNumber(LW) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) |
+ OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
+ OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SWR) |
+ OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) |
+ OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) |
+ OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(BC) |
+ OpcodeToBitNumber(BALC);
+
+#define FunctionFieldToBitNumber(function) (1ULL << function)
+
+ static const uint64_t kFunctionFieldRegisterTypeMask =
+ FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) |
+ FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) |
+ FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(SRA) |
+ FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(SRLV) |
+ FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(MFHI) |
+ FunctionFieldToBitNumber(MFLO) | FunctionFieldToBitNumber(MULT) |
+ FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DIV) |
+ FunctionFieldToBitNumber(DIVU) | FunctionFieldToBitNumber(ADD) |
+ FunctionFieldToBitNumber(ADDU) | FunctionFieldToBitNumber(SUB) |
+ FunctionFieldToBitNumber(SUBU) | FunctionFieldToBitNumber(AND) |
+ FunctionFieldToBitNumber(OR) | FunctionFieldToBitNumber(XOR) |
+ FunctionFieldToBitNumber(NOR) | FunctionFieldToBitNumber(SLT) |
+ FunctionFieldToBitNumber(SLTU) | FunctionFieldToBitNumber(TGE) |
+ FunctionFieldToBitNumber(TGEU) | FunctionFieldToBitNumber(TLT) |
+ FunctionFieldToBitNumber(TLTU) | FunctionFieldToBitNumber(TEQ) |
+ FunctionFieldToBitNumber(TNE) | FunctionFieldToBitNumber(MOVZ) |
+ FunctionFieldToBitNumber(MOVN) | FunctionFieldToBitNumber(MOVCI) |
+ FunctionFieldToBitNumber(SELEQZ_S) | FunctionFieldToBitNumber(SELNEZ_S);
+
+ // Get the encoding type of the instruction.
+ inline Type InstructionType(TypeChecks checks = NORMAL) const;
// Accessors for the different named fields used in the MIPS encoding.
inline Opcode OpcodeValue() const {
@@ -1044,6 +1093,91 @@ const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+
+Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
+ if (checks == EXTRA) {
+ if (OpcodeToBitNumber(OpcodeFieldRaw()) & kOpcodeImmediateTypeMask) {
+ return kImmediateType;
+ }
+ }
+ switch (OpcodeFieldRaw()) {
+ case SPECIAL:
+ if (checks == EXTRA) {
+ if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
+ kFunctionFieldRegisterTypeMask) {
+ return kRegisterType;
+ } else {
+ return kUnsupported;
+ }
+ } else {
+ return kRegisterType;
+ }
+ break;
+ case SPECIAL2:
+ switch (FunctionFieldRaw()) {
+ case MUL:
+ case CLZ:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ break;
+ case SPECIAL3:
+ switch (FunctionFieldRaw()) {
+ case INS:
+ case EXT:
+ return kRegisterType;
+ case BSHFL: {
+ int sa = SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP:
+ return kRegisterType;
+ case WSBH:
+ case SEB:
+ case SEH:
+ return kUnsupported;
+ }
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ }
+ default:
+ return kUnsupported;
+ }
+ break;
+ case COP1: // Coprocessor instructions.
+ switch (RsFieldRawNoAssert()) {
+ case BC1: // Branch on coprocessor condition.
+ case BC1EQZ:
+ case BC1NEZ:
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ }
+ break;
+ case COP1X:
+ return kRegisterType;
+
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case J:
+ case JAL:
+ return kJumpType;
+
+ default:
+ if (checks == NORMAL) {
+ return kImmediateType;
+ } else {
+ return kUnsupported;
+ }
+ }
+}
+
+#undef OpcodeToBitNumber
+#undef FunctionFieldToBitNumber
} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 6028e90b44..f24ec436f0 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1606,7 +1606,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
- switch (instr->InstructionType()) {
+ switch (instr->InstructionType(Instruction::EXTRA)) {
case Instruction::kRegisterType: {
DecodeTypeRegister(instr);
break;
diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc
index 5350239d6d..c962994079 100644
--- a/deps/v8/src/mips/frames-mips.cc
+++ b/deps/v8/src/mips/frames-mips.cc
@@ -8,6 +8,7 @@
#include "src/frames.h"
#include "src/mips/assembler-mips-inl.h"
#include "src/mips/assembler-mips.h"
+#include "src/mips/frames-mips.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 1afc3f2b29..b85b1cbf4d 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -31,6 +31,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return t0; }
const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return t0; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return t1; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
@@ -41,14 +46,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-const Register InstanceofDescriptor::left() { return a0; }
-const Register InstanceofDescriptor::right() { return a1; }
+const Register InstanceOfDescriptor::LeftRegister() { return a1; }
+const Register InstanceOfDescriptor::RightRegister() { return a0; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return a1; }
+const Register StringCompareDescriptor::RightRegister() { return a0; }
const Register ArgumentsAccessReadDescriptor::index() { return a1; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
+const Register ArgumentsAccessNewDescriptor::function() { return a1; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
+
+
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -64,10 +78,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -94,6 +108,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
@@ -181,6 +199,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a0: number of arguments
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a1, a0};
@@ -363,6 +390,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (including receiver)
+ a2, // address of first argument
+ a1 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 768531fce3..bf158b4c43 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -28,12 +28,12 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/mips/lithium-codegen-mips.h"
#include "src/mips/lithium-gap-resolver-mips.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
@@ -141,8 +141,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
- !info()->is_native() && info()->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -191,16 +190,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(a1);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -208,7 +218,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(a1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in both v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
@@ -241,13 +252,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -784,7 +789,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -1028,11 +1032,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2057,11 +2056,17 @@ void LCodeGen::EmitBranchF(InstrType instr,
}
-template<class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr,
- Condition condition,
- Register src1,
- const Operand& src2) {
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
+ Register src1, const Operand& src2) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
+ Register src1, const Operand& src2) {
int false_block = instr->FalseDestination(chunk_);
__ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
}
@@ -2378,46 +2383,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ Branch(is_object, eq, input, Operand(temp2));
-
- // Load map.
- __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
- __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
-
- // Load instance type and check that it is in object type range.
- __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ Branch(is_not_object,
- lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = scratch0();
-
- Condition true_cond =
- EmitIsObject(reg, temp1, temp2,
- instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond, temp2,
- Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2489,15 +2454,14 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ DCHECK(ToRegister(instr->left()).is(a1));
+ DCHECK(ToRegister(instr->right()).is(a0));
- Condition condition = ComputeCompareCondition(op);
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
- EmitBranch(instr, condition, v0, Operand(zero_reg));
+ EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
+ Operand(zero_reg));
}
@@ -2651,142 +2615,38 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Label true_label, done;
- DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0.
- DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1.
- Register result = ToRegister(instr->result());
- DCHECK(result.is(v0));
-
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(v0));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- __ Branch(&true_label, eq, result, Operand(zero_reg));
- __ li(result, Operand(factory()->false_value()));
- __ Branch(&done);
- __ bind(&true_label);
- __ li(result, Operand(factory()->true_value()));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
-
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ li(at, Operand(cell));
- __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
- __ BranchShort(&cache_miss, ne, map, Operand(at));
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false. The distance from map check has to be constant.
- __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
- __ Branch(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(temp, Heap::kNullValueRootIndex);
- __ Branch(&false_result, eq, object, Operand(temp));
-
- // String values is not instance of anything.
- Condition cc = __ IsObjectStringType(object, temp, temp);
- __ Branch(&false_result, cc, temp, Operand(zero_reg));
-
- // Go to the deferred code.
- __ Branch(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
}
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- DCHECK(result.is(v0));
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
-
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
-
- // Get the temp register reserved by the instruction. This needs to be t0 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- DCHECK(temp.is(t0));
- __ li(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 7;
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
- __ StoreToSafepointRegisterSlot(temp, temp);
- }
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ SmiTst(object, at);
+ EmitFalseBranch(instr, eq, at, Operand(zero_reg));
+ }
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+ __ Branch(USE_DELAY_SLOT, &loop);
+ __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
}
@@ -3547,11 +3407,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ li(a0, Operand(arity));
- }
+ // Always initialize a0 to the number of actual arguments.
+ __ li(a0, Operand(arity));
// Invoke function.
__ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
@@ -3958,9 +3815,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- if (instr->hydrogen()->pass_argument_count()) {
- __ li(a0, Operand(instr->arity()));
- }
+ __ li(a0, Operand(instr->arity()));
// Change context.
__ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -4327,6 +4182,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register scratch = scratch0();
+ Register scratch_1 = scratch1();
DoubleRegister double_scratch = double_scratch0();
bool key_is_constant = instr->key()->IsConstantOperand();
int base_offset = instr->base_offset();
@@ -4358,8 +4214,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
// Only load canonical NaN if the comparison above set the overflow.
__ bind(&is_nan);
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
+ __ ldc1(double_scratch,
+ FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
__ sdc1(double_scratch, MemOperand(scratch, 0));
__ Branch(&done);
}
@@ -5557,7 +5414,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// a0 = regexp literal clone.
// a2 and t0-t2 are used as temporaries.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ li(t3, instr->hydrogen()->literals());
__ lw(a1, FieldMemOperand(t3, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -5601,26 +5458,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ li(a2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a2, Operand(instr->hydrogen()->shared_info()));
- __ li(a1, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5715,28 +5552,26 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ GetObjectType(input, scratch, input);
- __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
- *cmp1 = input;
- *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ *cmp1 = scratch;
+ *cmp2 = Operand(1 << Map::kIsCallable);
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- Register map = input;
- __ GetObjectType(input, map, scratch);
- __ Branch(false_label,
- lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(USE_DELAY_SLOT, false_label,
- gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // map is still valid, so the BitField can be loaded in delay slot.
- // Check for undetectable objects => false.
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(input, scratch, scratch1());
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Check for callable or undetectable objects => false.
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
*cmp1 = at;
*cmp2 = Operand(zero_reg);
final_branch_condition = eq;
@@ -5793,7 +5628,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 6c5b695c28..858c7f12bc 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -113,9 +113,6 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
-
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register result,
@@ -278,10 +275,11 @@ class LCodeGen: public LCodeGenBase {
Condition condition,
FPURegister src1,
FPURegister src2);
- template<class InstrType>
- void EmitFalseBranch(InstrType instr,
- Condition condition,
- Register src1,
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
+ const Operand& src2);
+ template <class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
const Operand& src2);
template<class InstrType>
void EmitFalseBranchF(InstrType instr,
@@ -304,15 +302,6 @@ class LCodeGen: public LCodeGenBase {
Register* cmp1,
Operand* cmp2);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 6cd3410645..42ecab4b8e 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/mips/lithium-mips.h"
+
#include <sstream>
#if V8_TARGET_ARCH_MIPS
@@ -175,13 +177,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -931,28 +926,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -999,22 +991,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result =
- new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
- UseFixed(instr->right(), a1));
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), cp),
- UseFixed(instr->left(), a0),
- FixedTemp(t0));
- return MarkAsCall(DefineFixed(result, v0), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
@@ -1732,14 +1723,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
- temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -2457,13 +2440,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 2998219892..ed3332ca65 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -79,19 +79,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -131,6 +129,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -232,8 +231,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -389,6 +386,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -989,23 +992,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1170,41 +1156,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2552,19 +2524,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 224bc5c7f4..e4cf09798b 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1,3 +1,4 @@
+
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,8 +11,8 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
+#include "src/mips/macro-assembler-mips.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -1218,21 +1219,6 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
}
-void MacroAssembler::FlushICache(Register address, unsigned instructions) {
- RegList saved_regs = kJSCallerSaved | ra.bit();
- MultiPush(saved_regs);
- AllowExternalCallThatCantCauseGC scope(this);
-
- // Save to a0 in case address == t0.
- Move(a0, address);
- PrepareCallCFunction(2, t0);
-
- li(a1, instructions * kInstrSize);
- CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
- MultiPop(saved_regs);
-}
-
-
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
@@ -4080,10 +4066,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ li(a0, Operand(actual.immediate()));
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- li(a0, Operand(actual.immediate()));
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -4097,8 +4083,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
} else if (actual.is_immediate()) {
- Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
li(a0, Operand(actual.immediate()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
} else {
Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
@@ -4213,24 +4199,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
@@ -4270,34 +4238,8 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- GetObjectType(function, result, scratch);
- Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- lw(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- lw(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- And(scratch, scratch,
- Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
- Branch(miss, ne, scratch, Operand(zero_reg));
-
- // Make sure that the function has an instance prototype.
- lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- Branch(&non_instance, ne, scratch, Operand(zero_reg));
- }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
lw(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -4316,15 +4258,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Get the prototype from the initial map.
lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch, scratch);
- }
-
// All done.
bind(&done);
}
@@ -4626,13 +4559,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, id);
+ GetBuiltinEntry(t9, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
Call(t9);
@@ -4645,19 +4577,19 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ lw(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
- lw(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ lw(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, id);
+ GetBuiltinFunction(a1, native_context_index);
// Load the code entry point from the builtins object.
lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
}
@@ -4796,6 +4728,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ lw(dst, GlobalObjectOperand());
+ lw(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
@@ -4886,6 +4824,14 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ lw(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ lw(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ lw(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on mips.
@@ -4995,7 +4941,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
FPURegister reg = FPURegister::from_code(i);
- ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
+ ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize));
}
}
@@ -5196,13 +5142,10 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, t0);
- Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
- pop(object);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
}
}
@@ -5210,13 +5153,21 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, t0);
- Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
- pop(object);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
+ }
+}
+
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
}
}
@@ -5228,11 +5179,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- push(object);
- lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ lw(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
- pop(object);
+ Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
@@ -5257,86 +5206,6 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- sra(mask, mask, kSmiTagSize + 1);
- Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- JumpIfSmi(object, &is_smi);
- CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- lw(scratch2, MemOperand(scratch1, kPointerSize));
- lw(scratch1, MemOperand(scratch1, 0));
- Xor(scratch1, scratch1, Operand(scratch2));
- And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- Branch(not_found);
-
- bind(&is_smi);
- Register scratch = scratch1;
- sra(scratch, object, 1); // Shift away the tag.
- And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- sll(scratch, scratch, kPointerSizeLog2 + 1);
- Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- IncrementCounter(isolate()->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -5556,65 +5425,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void MacroAssembler::PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value) {
- lw(scratch, MemOperand(li_location));
- // At this point scratch is a lui(at, ...) instruction.
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeALui,
- scratch, Operand(LUI));
- lw(scratch, MemOperand(li_location));
- }
- srl(t9, new_value, kImm16Bits);
- Ins(scratch, t9, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location));
-
- lw(scratch, MemOperand(li_location, kInstrSize));
- // scratch is now ori(at, ...).
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeAnOri,
- scratch, Operand(ORI));
- lw(scratch, MemOperand(li_location, kInstrSize));
- }
- Ins(scratch, new_value, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location, kInstrSize));
-
- // Update the I-cache so the new lui and ori can be executed.
- FlushICache(li_location, 2);
-}
-
-void MacroAssembler::GetRelocatedValue(Register li_location,
- Register value,
- Register scratch) {
- lw(value, MemOperand(li_location));
- if (emit_debug_code()) {
- And(value, value, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeALui,
- value, Operand(LUI));
- lw(value, MemOperand(li_location));
- }
-
- // value now holds a lui instruction. Extract the immediate.
- sll(value, value, kImm16Bits);
-
- lw(scratch, MemOperand(li_location, kInstrSize));
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeAnOri,
- scratch, Operand(ORI));
- lw(scratch, MemOperand(li_location, kInstrSize));
- }
- // "scratch" now holds an ori instruction. Extract the immediate.
- andi(scratch, scratch, kImm16Mask);
-
- // Merge the results.
- or_(value, value, scratch);
-}
-
-
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 995c082119..1608c951b6 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -233,6 +233,8 @@ class MacroAssembler: public Assembler {
void Call(Label* target);
+ void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+
inline void Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
@@ -292,6 +294,24 @@ class MacroAssembler: public Assembler {
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
+ void PushRoot(Heap::RootListIndex index) {
+ LoadRoot(at, index);
+ Push(at);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ LoadRoot(at, index);
+ Branch(if_equal, eq, with, Operand(at));
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal) {
+ LoadRoot(at, index);
+ Branch(if_not_equal, ne, with, Operand(at));
+ }
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -750,11 +770,6 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
- // from C.
- // Does not handle errors.
- void FlushICache(Register address, unsigned instructions);
-
// MIPS32 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -923,6 +938,9 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -977,16 +995,6 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
-
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
void IsObjectJSStringType(Register object,
Register scratch,
Label* fail);
@@ -1041,11 +1049,8 @@ class MacroAssembler: public Assembler {
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
void GetObjectType(Register function,
Register map,
@@ -1327,18 +1332,16 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in a1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
struct Unresolved {
int pc;
@@ -1479,6 +1482,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1498,18 +1504,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// -------------------------------------------------------------------------
// String utilities.
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
// Checks if both instance types are sequential ASCII strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@@ -1593,20 +1587,14 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void StubPrologue();
void Prologue(bool code_pre_aging);
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
- // Patch the relocated value (lui/ori pair).
- void PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value);
- // Get the relocatad value (loaded data) from the lui/ori pair.
- void GetRelocatedValue(Register li_location,
- Register value,
- Register scratch);
-
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
@@ -1672,10 +1660,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 6dea3f09a3..4ef61abe3d 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -979,10 +979,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
InitializeCoverage();
- for (int i = 0; i < kNumExceptions; i++) {
- exceptions[i] = 0;
- }
-
last_debugger_input_ = NULL;
}
@@ -1628,7 +1624,8 @@ void Simulator::TraceRegWr(int32_t value) {
// TODO(plind): consider making icount_ printing a flag option.
void Simulator::TraceMemRd(int32_t addr, int32_t value) {
if (::v8::internal::FLAG_trace_sim) {
- SNPrintF(trace_buf_, "%08x <-- [%08x] (%d)", value, addr, icount_);
+ SNPrintF(trace_buf_, "%08x <-- [%08x] (%" PRIu64 ")", value, addr,
+ icount_);
}
}
@@ -2101,7 +2098,8 @@ bool Simulator::IsWatchpoint(uint32_t code) {
void Simulator::PrintWatchpoint(uint32_t code) {
MipsDebugger dbg(this);
++break_count_;
- PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ PrintF("\n---- break %d marker: %3d (instr count: %" PRIu64
+ ") ----------"
"----------------------------------",
code, break_count_, icount_);
dbg.PrintAllRegs(); // Print registers and continue running.
@@ -2185,362 +2183,26 @@ void Simulator::PrintStopInfo(uint32_t code) {
}
-void Simulator::SignalExceptions() {
- for (int i = 1; i < kNumExceptions; i++) {
- if (exceptions[i] != 0) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
- }
- }
+void Simulator::SignalException(Exception e) {
+ V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.",
+ static_cast<int>(e));
}
-// Handle execution based on instruction types.
-
-void Simulator::ConfigureTypeRegister(Instruction* instr,
- int32_t* alu_out,
- int64_t* i64hilo,
- uint64_t* u64hilo,
- int32_t* next_pc,
- int32_t* return_addr_reg,
- bool* do_interrupt) {
- // Every local variable declared here needs to be const.
- // This is to make sure that changed values are sent back to
- // DecodeTypeRegister correctly.
-
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int32_t rs = get_register(rs_reg);
- const uint32_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int32_t rt = get_register(rt_reg);
- const uint32_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
- const uint32_t sa = instr->SaValue();
- const uint8_t bp = instr->Bp2Value();
-
- const int32_t fs_reg = instr->FsValue();
-
-
- // ---------- Configuration.
- switch (op) {
- case COP1: // Coprocessor instructions.
- switch (instr->RsFieldRaw()) {
- case CFC1:
- // At the moment only FCSR is supported.
- DCHECK(fs_reg == kFCSRRegister);
- *alu_out = FCSR_;
- break;
- case MFC1:
- *alu_out = get_fpu_register_word(fs_reg);
- break;
- case MFHC1:
- *alu_out = get_fpu_register_hi_word(fs_reg);
- break;
- case CTC1:
- case MTC1:
- case MTHC1:
- case S:
- case D:
- case W:
- case L:
- case PS:
- // Do everything in the execution step.
- break;
- default:
- // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- }
- break;
- case COP1X:
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR:
- case JALR:
- *next_pc = get_register(instr->RsValue());
- *return_addr_reg = instr->RdValue();
- break;
- case SLL:
- *alu_out = rt << sa;
- break;
- case SRL:
- if (rs_reg == 0) {
- // Regular logical right shift of a word by a fixed number of
- // bits instruction. RS field is always equal to 0.
- *alu_out = rt_u >> sa;
- } else {
- // Logical right-rotate of a word by a fixed number of bits. This
- // is special case of SRL instruction, added in MIPS32 Release 2.
- // RS field is equal to 00001.
- *alu_out = base::bits::RotateRight32(rt_u, sa);
- }
- break;
- case SRA:
- *alu_out = rt >> sa;
- break;
- case SLLV:
- *alu_out = rt << rs;
- break;
- case SRLV:
- if (sa == 0) {
- // Regular logical right-shift of a word by a variable number of
- // bits instruction. SA field is always equal to 0.
- *alu_out = rt_u >> rs;
- } else {
- // Logical right-rotate of a word by a variable number of bits.
- // This is special case od SRLV instruction, added in MIPS32
- // Release 2. SA field is equal to 00001.
- *alu_out = base::bits::RotateRight32(rt_u, rs_u);
- }
- break;
- case SRAV:
- *alu_out = rt >> rs;
- break;
- case MFHI: // MFHI == CLZ on R6.
- if (!IsMipsArchVariant(kMips32r6)) {
- DCHECK(instr->SaValue() == 0);
- *alu_out = get_register(HI);
- } else {
- // MIPS spec: If no bits were set in GPR rs, the result written to
- // GPR rd is 32.
- DCHECK(instr->SaValue() == 1);
- *alu_out = base::bits::CountLeadingZeros32(rs_u);
- }
- break;
- case MFLO:
- *alu_out = get_register(LO);
- break;
- case MULT: // MULT == MUL_MUH.
- if (!IsMipsArchVariant(kMips32r6)) {
- *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
- } else {
- switch (instr->SaValue()) {
- case MUL_OP:
- case MUH_OP:
- *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
- }
- }
- break;
- case MULTU: // MULTU == MUL_MUH_U.
- if (!IsMipsArchVariant(kMips32r6)) {
- *u64hilo = static_cast<uint64_t>(rs_u) *
- static_cast<uint64_t>(rt_u);
- } else {
- switch (instr->SaValue()) {
- case MUL_OP:
- case MUH_OP:
- *u64hilo = static_cast<uint64_t>(rs_u) *
- static_cast<uint64_t>(rt_u);
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
- }
- }
- break;
- case ADD:
- if (HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
- }
- }
- *alu_out = rs + rt;
- break;
- case ADDU:
- *alu_out = rs + rt;
- break;
- case SUB:
- if (!HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
- }
- }
- *alu_out = rs - rt;
- break;
- case SUBU:
- *alu_out = rs - rt;
- break;
- case AND:
- *alu_out = rs & rt;
- break;
- case OR:
- *alu_out = rs | rt;
- break;
- case XOR:
- *alu_out = rs ^ rt;
- break;
- case NOR:
- *alu_out = ~(rs | rt);
- break;
- case SLT:
- *alu_out = rs < rt ? 1 : 0;
- break;
- case SLTU:
- *alu_out = rs_u < rt_u ? 1 : 0;
- break;
- // Break and trap instructions.
- case BREAK:
- *do_interrupt = true;
- break;
- case TGE:
- *do_interrupt = rs >= rt;
- break;
- case TGEU:
- *do_interrupt = rs_u >= rt_u;
- break;
- case TLT:
- *do_interrupt = rs < rt;
- break;
- case TLTU:
- *do_interrupt = rs_u < rt_u;
- break;
- case TEQ:
- *do_interrupt = rs == rt;
- break;
- case TNE:
- *do_interrupt = rs != rt;
- break;
- case MOVN:
- case MOVZ:
- case MOVCI:
- // No action taken on decode.
- break;
- case DIV:
- case DIVU:
- // div and divu never raise exceptions.
- case SELEQZ_S:
- case SELNEZ_S:
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- *alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
- break;
- case CLZ:
- // MIPS32 spec: If no bits were set in GPR rs, the result written to
- // GPR rd is 32.
- *alu_out = base::bits::CountLeadingZeros32(rs_u);
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa;
- uint16_t size = msb - lsb + 1;
- uint32_t mask = (1 << size) - 1;
- *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
- break;
- }
- case EXT: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of extract.
- uint16_t lsb = sa;
- uint16_t size = msb + 1;
- uint32_t mask = (1 << size) - 1;
- *alu_out = (rs_u & (mask << lsb)) >> lsb;
- break;
- }
- case BSHFL: {
- int sa = instr->SaFieldRaw() >> kSaShift;
- switch (sa) {
- case BITSWAP: {
- uint32_t input = static_cast<uint32_t>(rt);
- uint32_t output = 0;
- uint8_t i_byte, o_byte;
-
- // Reverse the bit in byte for each individual byte
- for (int i = 0; i < 4; i++) {
- output = output >> 8;
- i_byte = input & 0xff;
-
- // Fast way to reverse bits in byte
- // Devised by Sean Anderson, July 13, 2001
- o_byte =
- static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
- (i_byte * 0x8020LU & 0x88440LU)) *
- 0x10101LU >>
- 16);
-
- output = output | (static_cast<uint32_t>(o_byte << 24));
- input = input >> 8;
- }
-
- *alu_out = static_cast<int32_t>(output);
- break;
- }
- case SEB:
- case SEH:
- case WSBH:
- UNREACHABLE();
- break;
- default: {
- sa >>= kBp2Bits;
- switch (sa) {
- case ALIGN: {
- if (bp == 0) {
- *alu_out = static_cast<int32_t>(rt);
- } else {
- uint32_t rt_hi = rt << (8 * bp);
- uint32_t rs_lo = rs >> (8 * (4 - bp));
- *alu_out = static_cast<int32_t>(rt_hi | rs_lo);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
- const int32_t& fr_reg,
- const int32_t& fs_reg,
- const int32_t& ft_reg,
- const int32_t& fd_reg) {
+void Simulator::DecodeTypeRegisterDRsType() {
double ft, fs, fd;
uint32_t cc, fcsr_cc;
int64_t i64;
- fs = get_fpu_register_double(fs_reg);
- ft = (instr->FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg)
- : 0.0;
- fd = get_fpu_register_double(fd_reg);
+ fs = get_fpu_register_double(fs_reg());
+ ft = (get_instr()->FunctionFieldRaw() != MOVF)
+ ? get_fpu_register_double(ft_reg())
+ : 0.0;
+ fd = get_fpu_register_double(fd_reg());
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
- cc = instr->FCccValue();
+ cc = get_instr()->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
- switch (instr->FunctionFieldRaw()) {
+ switch (get_instr()->FunctionFieldRaw()) {
case RINT: {
DCHECK(IsMipsArchVariant(kMips32r6));
double result, temp, temp_result;
@@ -2572,7 +2234,7 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
result = lower;
break;
}
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
@@ -2580,69 +2242,67 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
}
case SEL:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ set_fpu_register_double(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg, (ft_int & 0x1) == 0 ? fs : 0.0);
+ set_fpu_register_double(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_double(fd_reg, (ft_int & 0x1) != 0 ? fs : 0.0);
+ set_fpu_register_double(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
break;
case MOVZ_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
- int32_t rt_reg = instr->RtValue();
- int32_t rt = get_register(rt_reg);
- if (rt == 0) {
- set_fpu_register_double(fd_reg, fs);
+ if (rt() == 0) {
+ set_fpu_register_double(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
- int32_t rt_reg = instr->RtValue();
+ int32_t rt_reg = get_instr()->RtValue();
int32_t rt = get_register(rt_reg);
if (rt != 0) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
}
break;
}
case MOVF: {
// Same function field for MOVT.D and MOVF.D
- uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (instr->Bit(16)) { // Read Tf bit.
+ if (get_instr()->Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
}
break;
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
- set_fpu_register_double(fd_reg, (fs >= ft) ? ft : fs);
+ set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
}
break;
case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
double result;
if (fabs(fs) > fabs(ft)) {
@@ -2652,18 +2312,18 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
}
break;
case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
double result;
if (fabs(fs) < fabs(ft)) {
@@ -2673,57 +2333,57 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
}
break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
- set_fpu_register_double(fd_reg, (fs <= ft) ? ft : fs);
+ set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
}
break;
break;
case ADD_D:
- set_fpu_register_double(fd_reg, fs + ft);
+ set_fpu_register_double(fd_reg(), fs + ft);
break;
case SUB_D:
- set_fpu_register_double(fd_reg, fs - ft);
+ set_fpu_register_double(fd_reg(), fs - ft);
break;
case MUL_D:
- set_fpu_register_double(fd_reg, fs * ft);
+ set_fpu_register_double(fd_reg(), fs * ft);
break;
case DIV_D:
- set_fpu_register_double(fd_reg, fs / ft);
+ set_fpu_register_double(fd_reg(), fs / ft);
break;
case ABS_D:
- set_fpu_register_double(fd_reg, fabs(fs));
+ set_fpu_register_double(fd_reg(), fabs(fs));
break;
case MOV_D:
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
break;
case NEG_D:
- set_fpu_register_double(fd_reg, -fs);
+ set_fpu_register_double(fd_reg(), -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg, fast_sqrt(fs));
+ set_fpu_register_double(fd_reg(), fast_sqrt(fs));
break;
case RSQRT_D: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
double result = 1.0 / fast_sqrt(fs);
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
break;
}
case RECIP_D: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
double result = 1.0 / fs;
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
break;
}
case C_UN_D:
@@ -2751,9 +2411,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
double rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case ROUND_W_D: // Round double to word (round half to even).
@@ -2765,49 +2425,49 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
{
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case FLOOR_W_D: // Round double to word towards negative infinity.
{
double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case CEIL_W_D: // Round double to word towards positive infinity.
{
double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case CVT_S_D: // Convert double to float (single).
- set_fpu_register_float(fd_reg, static_cast<float>(fs));
+ set_fpu_register_float(fd_reg(), static_cast<float>(fs));
break;
case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word.
if (IsFp64Mode()) {
int64_t result;
double rounded;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -2820,9 +2480,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
double rounded = trunc(fs);
i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -2840,9 +2500,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
}
int64_t i64 = static_cast<int64_t>(result);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -2854,9 +2514,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
double rounded = std::floor(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -2868,9 +2528,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
double rounded = std::ceil(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -2938,7 +2598,7 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
DCHECK(result != 0);
dResult = bit_cast<double>(result);
- set_fpu_register_double(fd_reg, dResult);
+ set_fpu_register_double(fd_reg(), dResult);
break;
}
@@ -2952,92 +2612,90 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
- const int32_t& fd_reg,
- const int32_t& fs_reg,
- const int32_t& ft_reg) {
- float fs = get_fpu_register_float(fs_reg);
- float ft = get_fpu_register_float(ft_reg);
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterWRsType() {
+ float fs = get_fpu_register_float(fs_reg());
+ float ft = get_fpu_register_float(ft_reg());
+ int32_t alu_out = 0x12345678;
+ switch (get_instr()->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
- alu_out = get_fpu_register_signed_word(fs_reg);
- set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
+ alu_out = get_fpu_register_signed_word(fs_reg());
+ set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- alu_out = get_fpu_register_signed_word(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
+ alu_out = get_fpu_register_signed_word(fs_reg());
+ set_fpu_register_double(fd_reg(), static_cast<double>(alu_out));
break;
case CMP_AF:
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
default:
@@ -3046,20 +2704,17 @@ void Simulator::DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
}
-void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
- const int32_t& ft_reg,
- const int32_t& fs_reg,
- const int32_t& fd_reg) {
+void Simulator::DecodeTypeRegisterSRsType() {
float fs, ft, fd;
- fs = get_fpu_register_float(fs_reg);
- ft = get_fpu_register_float(ft_reg);
- fd = get_fpu_register_float(fd_reg);
+ fs = get_fpu_register_float(fs_reg());
+ ft = get_fpu_register_float(ft_reg());
+ fd = get_fpu_register_float(fd_reg());
int32_t ft_int = bit_cast<int32_t>(ft);
int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
- cc = instr->FCccValue();
+ cc = get_instr()->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
- switch (instr->FunctionFieldRaw()) {
+ switch (get_instr()->FunctionFieldRaw()) {
case RINT: {
DCHECK(IsMipsArchVariant(kMips32r6));
float result, temp_result;
@@ -3092,46 +2747,46 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
result = lower;
break;
}
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
break;
}
case ADD_S:
- set_fpu_register_float(fd_reg, fs + ft);
+ set_fpu_register_float(fd_reg(), fs + ft);
break;
case SUB_S:
- set_fpu_register_float(fd_reg, fs - ft);
+ set_fpu_register_float(fd_reg(), fs - ft);
break;
case MUL_S:
- set_fpu_register_float(fd_reg, fs * ft);
+ set_fpu_register_float(fd_reg(), fs * ft);
break;
case DIV_S:
- set_fpu_register_float(fd_reg, fs / ft);
+ set_fpu_register_float(fd_reg(), fs / ft);
break;
case ABS_S:
- set_fpu_register_float(fd_reg, fabs(fs));
+ set_fpu_register_float(fd_reg(), fabs(fs));
break;
case MOV_S:
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
break;
case NEG_S:
- set_fpu_register_float(fd_reg, -fs);
+ set_fpu_register_float(fd_reg(), -fs);
break;
case SQRT_S:
- set_fpu_register_float(fd_reg, fast_sqrt(fs));
+ set_fpu_register_float(fd_reg(), fast_sqrt(fs));
break;
case RSQRT_S: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
float result = 1.0 / fast_sqrt(fs);
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
break;
}
case RECIP_S: {
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
float result = 1.0 / fs;
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
break;
}
case C_F_D:
@@ -3159,15 +2814,15 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case CVT_D_S:
- set_fpu_register_double(fd_reg, static_cast<double>(fs));
+ set_fpu_register_double(fd_reg(), static_cast<double>(fs));
break;
case SEL:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ set_fpu_register_float(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case CLASS_S: { // Mips32r6 instruction
// Convert float input to uint32_t for easier bit manipulation
- float fs = get_fpu_register_float(fs_reg);
+ float fs = get_fpu_register_float(fs_reg());
uint32_t classed = bit_cast<uint32_t>(fs);
// Extracting sign, exponent and mantissa from the input float
@@ -3227,58 +2882,56 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
DCHECK(result != 0);
fResult = bit_cast<float>(result);
- set_fpu_register_float(fd_reg, fResult);
+ set_fpu_register_float(fd_reg(), fResult);
break;
}
case SELEQZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(
- fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ set_fpu_register_float(fd_reg(), (ft_int & 0x1) == 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case SELNEZ_C:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_fpu_register_float(
- fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ set_fpu_register_float(fd_reg(), (ft_int & 0x1) != 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case MOVZ_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
- int32_t rt_reg = instr->RtValue();
- int32_t rt = get_register(rt_reg);
- if (rt == 0) {
- set_fpu_register_float(fd_reg, fs);
+ if (rt() == 0) {
+ set_fpu_register_float(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(IsMipsArchVariant(kMips32r2));
- int32_t rt_reg = instr->RtValue();
- int32_t rt = get_register(rt_reg);
- if (rt != 0) {
- set_fpu_register_float(fd_reg, fs);
+ if (rt() != 0) {
+ set_fpu_register_float(fd_reg(), fs);
}
break;
}
case MOVF: {
// Same function field for MOVT.D and MOVF.D
- uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (instr->Bit(16)) { // Read Tf bit.
+ if (get_instr()->Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
}
break;
}
case TRUNC_W_S: { // Truncate single to word (round towards 0).
float rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case TRUNC_L_S: { // Mips32r2 instruction.
@@ -3286,9 +2939,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
float rounded = trunc(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -3299,9 +2952,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
{
float rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case FLOOR_L_S: { // Mips32r2 instruction.
@@ -3309,9 +2962,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
float rounded = std::floor(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -3326,9 +2979,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
break;
}
@@ -3343,9 +2996,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
}
int64_t i64 = static_cast<int64_t>(result);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -3356,9 +3009,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
{
float rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case CEIL_L_S: { // Mips32r2 instruction.
@@ -3366,9 +3019,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
float rounded = std::ceil(fs);
int64_t i64 = static_cast<int64_t>(rounded);
if (IsFp64Mode()) {
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -3377,39 +3030,39 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
}
case MIN:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
- set_fpu_register_float(fd_reg, (fs >= ft) ? ft : fs);
+ set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
}
break;
case MAX:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
- set_fpu_register_float(fd_reg, (fs <= ft) ? ft : fs);
+ set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
}
break;
case MINA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
float result;
if (fabs(fs) > fabs(ft)) {
@@ -3419,18 +3072,18 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
}
break;
case MAXA:
DCHECK(IsMipsArchVariant(kMips32r6));
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
float result;
if (fabs(fs) < fabs(ft)) {
@@ -3440,7 +3093,7 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
}
break;
case CVT_L_S: {
@@ -3448,9 +3101,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
int64_t result;
float rounded;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
} else {
UNSUPPORTED();
@@ -3461,9 +3114,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
float rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
break;
}
@@ -3475,105 +3128,102 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
- const int32_t& ft_reg,
- const int32_t& fs_reg,
- const int32_t& fd_reg) {
- double fs = get_fpu_register_double(fs_reg);
- double ft = get_fpu_register_double(ft_reg);
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterLRsType() {
+ double fs = get_fpu_register_double(fs_reg());
+ double ft = get_fpu_register_double(ft_reg());
+ switch (get_instr()->FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
// Watch the signs here, we want 2 32-bit vals
// to make a sign-64.
int64_t i64;
if (IsFp64Mode()) {
- i64 = get_fpu_register(fs_reg);
+ i64 = get_fpu_register(fs_reg());
} else {
- i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
- i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg + 1)) << 32;
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg()));
+ i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg() + 1)) << 32;
}
- set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ set_fpu_register_double(fd_reg(), static_cast<double>(i64));
break;
case CVT_S_L:
if (IsFp64Mode()) {
- i64 = get_fpu_register(fs_reg);
+ i64 = get_fpu_register(fs_reg());
} else {
- i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
- i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg + 1)) << 32;
+ i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg()));
+ i64 |= static_cast<int64_t>(get_fpu_register_word(fs_reg() + 1)) << 32;
}
- set_fpu_register_float(fd_reg, static_cast<float>(i64));
+ set_fpu_register_float(fd_reg(), static_cast<float>(i64));
break;
case CMP_AF: // Mips64r6 CMP.D instructions.
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
default:
@@ -3582,67 +3232,62 @@ void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterCOP1(
- Instruction* instr, const int32_t& rs_reg, const int32_t& rs,
- const uint32_t& rs_u, const int32_t& rt_reg, const int32_t& rt,
- const uint32_t& rt_u, const int32_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int32_t& fd_reg,
- int64_t& i64hilo, uint64_t& u64hilo, int32_t& alu_out, bool& do_interrupt,
- int32_t& current_pc, int32_t& next_pc, int32_t& return_addr_reg) {
- switch (instr->RsFieldRaw()) {
+void Simulator::DecodeTypeRegisterCOP1() {
+ switch (get_instr()->RsFieldRaw()) {
case CFC1:
- set_register(rt_reg, alu_out);
+ // At the moment only FCSR is supported.
+ DCHECK(fs_reg() == kFCSRRegister);
+ set_register(rt_reg(), FCSR_);
break;
case MFC1:
- set_register(rt_reg, alu_out);
+ set_register(rt_reg(), get_fpu_register_word(fs_reg()));
break;
case MFHC1:
- set_register(rt_reg, alu_out);
+ set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
case CTC1:
// At the moment only FCSR is supported.
- DCHECK(fs_reg == kFCSRRegister);
- FCSR_ = registers_[rt_reg];
+ DCHECK(fs_reg() == kFCSRRegister);
+ FCSR_ = registers_[rt_reg()];
break;
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
- set_fpu_register_hi_word(fs_reg, 0);
- set_fpu_register_word(fs_reg, registers_[rt_reg]);
+ set_fpu_register_hi_word(fs_reg(), 0);
+ set_fpu_register_word(fs_reg(), registers_[rt_reg()]);
break;
case MTHC1:
- set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
+ set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]);
break;
case S: {
- DecodeTypeRegisterSRsType(instr, ft_reg, fs_reg, fd_reg);
+ DecodeTypeRegisterSRsType();
break;
}
case D:
- DecodeTypeRegisterDRsType(instr, fr_reg, fs_reg, ft_reg, fd_reg);
+ DecodeTypeRegisterDRsType();
break;
case W:
- DecodeTypeRegisterWRsType(instr, alu_out, fd_reg, fs_reg, ft_reg);
+ DecodeTypeRegisterWRsType();
break;
case L:
- DecodeTypeRegisterLRsType(instr, ft_reg, fs_reg, fd_reg);
+ DecodeTypeRegisterLRsType();
break;
+ case PS:
+ // Not implemented.
+ UNREACHABLE();
default:
UNREACHABLE();
}
}
-void Simulator::DecodeTypeRegisterCOP1X(Instruction* instr,
- const int32_t& fr_reg,
- const int32_t& fs_reg,
- const int32_t& ft_reg,
- const int32_t& fd_reg) {
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterCOP1X() {
+ switch (get_instr()->FunctionFieldRaw()) {
case MADD_D:
double fr, ft, fs;
- fr = get_fpu_register_double(fr_reg);
- fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
- set_fpu_register_double(fd_reg, fs * ft + fr);
+ fr = get_fpu_register_double(fr_reg());
+ fs = get_fpu_register_double(fs_reg());
+ ft = get_fpu_register_double(ft_reg());
+ set_fpu_register_double(fd_reg(), fs * ft + fr);
break;
default:
UNREACHABLE();
@@ -3650,216 +3295,411 @@ void Simulator::DecodeTypeRegisterCOP1X(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterSPECIAL(
- Instruction* instr, const int32_t& rs_reg, const int32_t& rs,
- const uint32_t& rs_u, const int32_t& rt_reg, const int32_t& rt,
- const uint32_t& rt_u, const int32_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int32_t& fd_reg,
- int64_t& i64hilo, uint64_t& u64hilo, int32_t& alu_out, bool& do_interrupt,
- int32_t& current_pc, int32_t& next_pc, int32_t& return_addr_reg) {
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterSPECIAL() {
+ int64_t alu_out = 0x12345678;
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+ bool do_interrupt = false;
+
+ switch (get_instr()->FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_register(rd_reg, rt == 0 ? rs : 0);
+ set_register(rd_reg(), rt() == 0 ? rs() : 0);
break;
case SELNEZ_S:
DCHECK(IsMipsArchVariant(kMips32r6));
- set_register(rd_reg, rt != 0 ? rs : 0);
- break;
- case JR: {
- Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- set_pc(next_pc);
- pc_modified_ = true;
- break;
+ set_register(rd_reg(), rt() != 0 ? rs() : 0);
+ break;
+ case JR: {
+ int32_t next_pc = rs();
+ int32_t current_pc = get_pc();
+ Instruction* branch_delay_instr =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case JALR: {
+ int32_t next_pc = rs();
+ int32_t return_addr_reg = rd_reg();
+ int32_t current_pc = get_pc();
+ Instruction* branch_delay_instr =
+ reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+ BranchDelayInstructionDecode(branch_delay_instr);
+ set_register(return_addr_reg, current_pc + 2 * Instruction::kInstrSize);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case SLL:
+ alu_out = rt() << sa();
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case SRL:
+ if (rs_reg() == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u() >> sa();
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = base::bits::RotateRight32(rt_u(), sa());
+ }
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case SRA:
+ alu_out = rt() >> sa();
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case SLLV:
+ alu_out = rt() << rs();
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case SRLV:
+ if (sa() == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u() >> rs();
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = base::bits::RotateRight32(rt_u(), rs_u());
+ }
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case SRAV:
+ alu_out = rt() >> rs();
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case MFHI: // MFHI == CLZ on R6.
+ if (!IsMipsArchVariant(kMips32r6)) {
+ DCHECK(sa() == 0);
+ alu_out = get_register(HI);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ DCHECK(sa() == 1);
+ alu_out = base::bits::CountLeadingZeros32(rs_u());
+ }
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ case MFLO:
+ alu_out = get_register(LO);
+ SetResult(rd_reg(), static_cast<int32_t>(alu_out));
+ break;
+ // Instructions using HI and LO registers.
+ case MULT:
+ i64hilo = static_cast<int64_t>(rs()) * static_cast<int64_t>(rt());
+ if (!IsMipsArchVariant(kMips32r6)) {
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ } else {
+ switch (sa()) {
+ case MUL_OP:
+ set_register(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
}
- case JALR: {
- Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstrSize);
- BranchDelayInstructionDecode(branch_delay_instr);
- set_register(return_addr_reg,
- current_pc + 2 * Instruction::kInstrSize);
- set_pc(next_pc);
- pc_modified_ = true;
- break;
+ }
+ break;
+ case MULTU:
+ u64hilo = static_cast<uint64_t>(rs_u()) * static_cast<uint64_t>(rt_u());
+ if (!IsMipsArchVariant(kMips32r6)) {
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+ } else {
+ switch (sa()) {
+ case MUL_OP:
+ set_register(rd_reg(), static_cast<int32_t>(u64hilo & 0xffffffff));
+ break;
+ case MUH_OP:
+ set_register(rd_reg(), static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
}
- // Instructions using HI and LO registers.
- case MULT:
- if (!IsMipsArchVariant(kMips32r6)) {
- set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(i64hilo >> 32));
- } else {
- switch (instr->SaValue()) {
- case MUL_OP:
- set_register(rd_reg,
- static_cast<int32_t>(i64hilo & 0xffffffff));
- break;
- case MUH_OP:
- set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
- }
- }
- break;
- case MULTU:
- if (!IsMipsArchVariant(kMips32r6)) {
- set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
- set_register(HI, static_cast<int32_t>(u64hilo >> 32));
- } else {
- switch (instr->SaValue()) {
- case MUL_OP:
- set_register(rd_reg,
- static_cast<int32_t>(u64hilo & 0xffffffff));
- break;
- case MUH_OP:
- set_register(rd_reg, static_cast<int32_t>(u64hilo >> 32));
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
+ }
+ break;
+ case DIV:
+ if (IsMipsArchVariant(kMips32r6)) {
+ switch (get_instr()->SaValue()) {
+ case DIV_OP:
+ if (rs() == INT_MIN && rt() == -1) {
+ set_register(rd_reg(), INT_MIN);
+ } else if (rt() != 0) {
+ set_register(rd_reg(), rs() / rt());
}
- }
- break;
- case DIV:
- if (IsMipsArchVariant(kMips32r6)) {
- switch (instr->SaValue()) {
- case DIV_OP:
- if (rs == INT_MIN && rt == -1) {
- set_register(rd_reg, INT_MIN);
- } else if (rt != 0) {
- set_register(rd_reg, rs / rt);
- }
- break;
- case MOD_OP:
- if (rs == INT_MIN && rt == -1) {
- set_register(rd_reg, 0);
- } else if (rt != 0) {
- set_register(rd_reg, rs % rt);
- }
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
+ break;
+ case MOD_OP:
+ if (rs() == INT_MIN && rt() == -1) {
+ set_register(rd_reg(), 0);
+ } else if (rt() != 0) {
+ set_register(rd_reg(), rs() % rt());
}
- } else {
- // Divide by zero and overflow was not checked in the
- // configuration step - div and divu do not raise exceptions. On
- // division by 0 the result will be UNPREDICTABLE. On overflow
- // (INT_MIN/-1), return INT_MIN which is what the hardware does.
- if (rs == INT_MIN && rt == -1) {
- set_register(LO, INT_MIN);
- set_register(HI, 0);
- } else if (rt != 0) {
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } else {
+ // Divide by zero and overflow was not checked in the
+ // configuration step - div and divu do not raise exceptions. On
+ // division by 0 the result will be UNPREDICTABLE. On overflow
+ // (INT_MIN/-1), return INT_MIN which is what the hardware does.
+ if (rs() == INT_MIN && rt() == -1) {
+ set_register(LO, INT_MIN);
+ set_register(HI, 0);
+ } else if (rt() != 0) {
+ set_register(LO, rs() / rt());
+ set_register(HI, rs() % rt());
+ }
+ }
+ break;
+ case DIVU:
+ if (IsMipsArchVariant(kMips32r6)) {
+ switch (get_instr()->SaValue()) {
+ case DIV_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u() / rt_u());
}
- }
- break;
- case DIVU:
- if (IsMipsArchVariant(kMips32r6)) {
- switch (instr->SaValue()) {
- case DIV_OP:
- if (rt_u != 0) {
- set_register(rd_reg, rs_u / rt_u);
- }
- break;
- case MOD_OP:
- if (rt_u != 0) {
- set_register(rd_reg, rs_u % rt_u);
- }
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
- }
- } else {
- if (rt_u != 0) {
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ break;
+ case MOD_OP:
+ if (rt_u() != 0) {
+ set_register(rd_reg(), rs_u() % rt_u());
}
+ break;
+ default:
+ UNIMPLEMENTED_MIPS();
+ break;
+ }
+ } else {
+ if (rt_u() != 0) {
+ set_register(LO, rs_u() / rt_u());
+ set_register(HI, rs_u() % rt_u());
+ }
+ }
+ break;
+ case ADD:
+ if (HaveSameSign(rs(), rt())) {
+ if (rs() > 0) {
+ if (rs() <= (Registers::kMaxValue - rt())) {
+ SignalException(kIntegerOverflow);
}
- break;
- // Break and trap instructions.
- case BREAK:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- if (do_interrupt) {
- SoftwareInterrupt(instr);
+ } else if (rs() < 0) {
+ if (rs() >= (Registers::kMinValue - rt())) {
+ SignalException(kIntegerUnderflow);
}
- break;
- // Conditional moves.
- case MOVN:
- if (rt) {
- set_register(rd_reg, rs);
- TraceRegWr(rs);
+ }
+ }
+ SetResult(rd_reg(), rs() + rt());
+ break;
+ case ADDU:
+ SetResult(rd_reg(), rs() + rt());
+ break;
+ case SUB:
+ if (!HaveSameSign(rs(), rt())) {
+ if (rs() > 0) {
+ if (rs() <= (Registers::kMaxValue + rt())) {
+ SignalException(kIntegerOverflow);
}
- break;
- case MOVCI: {
- uint32_t cc = instr->FBccValue();
- uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (instr->Bit(16)) { // Read Tf bit.
- if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
- } else {
- if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ } else if (rs() < 0) {
+ if (rs() >= (Registers::kMinValue + rt())) {
+ SignalException(kIntegerUnderflow);
}
- break;
}
- case MOVZ:
- if (!rt) {
- set_register(rd_reg, rs);
- TraceRegWr(rs);
- }
- break;
- default: // For other special opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
}
+ SetResult(rd_reg(), rs() - rt());
+ break;
+ case SUBU:
+ SetResult(rd_reg(), rs() - rt());
+ break;
+ case AND:
+ SetResult(rd_reg(), rs() & rt());
+ break;
+ case OR:
+ SetResult(rd_reg(), rs() | rt());
+ break;
+ case XOR:
+ SetResult(rd_reg(), rs() ^ rt());
+ break;
+ case NOR:
+ SetResult(rd_reg(), ~(rs() | rt()));
+ break;
+ case SLT:
+ SetResult(rd_reg(), rs() < rt() ? 1 : 0);
+ break;
+ case SLTU:
+ SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0);
+ break;
+ // Break and trap instructions.
+ case BREAK:
+ do_interrupt = true;
+ break;
+ case TGE:
+ do_interrupt = rs() >= rt();
+ break;
+ case TGEU:
+ do_interrupt = rs_u() >= rt_u();
+ break;
+ case TLT:
+ do_interrupt = rs() < rt();
+ break;
+ case TLTU:
+ do_interrupt = rs_u() < rt_u();
+ break;
+ case TEQ:
+ do_interrupt = rs() == rt();
+ break;
+ case TNE:
+ do_interrupt = rs() != rt();
+ break;
+ // Conditional moves.
+ case MOVN:
+ if (rt()) {
+ set_register(rd_reg(), rs());
+ TraceRegWr(rs());
+ }
+ break;
+ case MOVCI: {
+ uint32_t cc = get_instr()->FBccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
+ } else {
+ if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
+ }
+ break;
+ }
+ case MOVZ:
+ if (!rt()) {
+ set_register(rd_reg(), rs());
+ TraceRegWr(rs());
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ if (do_interrupt) {
+ SoftwareInterrupt(get_instr());
+ }
}
-void Simulator::DecodeTypeRegisterSPECIAL2(Instruction* instr,
- const int32_t& rd_reg,
- int32_t& alu_out) {
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterSPECIAL2() {
+ int32_t alu_out;
+ switch (get_instr()->FunctionFieldRaw()) {
case MUL:
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ // Only the lower 32 bits are kept.
+ alu_out = rs_u() * rt_u();
// HI and LO are UNPREDICTABLE after the operation.
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
- default: // For other special2 opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ case CLZ:
+ // MIPS32 spec: If no bits were set in GPR rs, the result written to
+ // GPR rd is 32.
+ alu_out = base::bits::CountLeadingZeros32(rs_u());
+ break;
+ default:
+ alu_out = 0x12345678;
+ UNREACHABLE();
}
+ SetResult(rd_reg(), alu_out);
}
-void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
- const int32_t& rt_reg,
- const int32_t& rd_reg,
- int32_t& alu_out) {
- switch (instr->FunctionFieldRaw()) {
- case INS:
+void Simulator::DecodeTypeRegisterSPECIAL3() {
+ int32_t alu_out;
+ switch (get_instr()->FunctionFieldRaw()) {
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
// Ins instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- TraceRegWr(alu_out);
+ SetResult(rt_reg(), alu_out);
break;
- case EXT:
- set_register(rt_reg, alu_out);
- TraceRegWr(alu_out);
+ }
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa();
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u() & (mask << lsb)) >> lsb;
+ SetResult(rt_reg(), alu_out);
break;
- case BSHFL:
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ }
+ case BSHFL: {
+ int sa = get_instr()->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP: {
+ uint32_t input = static_cast<uint32_t>(rt());
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int32_t>(output);
+ break;
+ }
+ case SEB:
+ case SEH:
+ case WSBH:
+ alu_out = 0x12345678;
+ UNREACHABLE();
+ break;
+ default: {
+ const uint8_t bp = get_instr()->Bp2Value();
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ if (bp == 0) {
+ alu_out = static_cast<int32_t>(rt());
+ } else {
+ uint32_t rt_hi = rt() << (8 * bp);
+ uint32_t rs_lo = rs() >> (8 * (4 - bp));
+ alu_out = static_cast<int32_t>(rt_hi | rs_lo);
+ }
+ break;
+ }
+ default:
+ alu_out = 0x12345678;
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ SetResult(rd_reg(), alu_out);
break;
+ }
default:
UNREACHABLE();
}
@@ -3867,134 +3707,101 @@ void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields.
const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int32_t rs = get_register(rs_reg);
- const uint32_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int32_t rt = get_register(rt_reg);
- const uint32_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
-
- const int32_t fr_reg = instr->FrValue();
- const int32_t fs_reg = instr->FsValue();
- const int32_t ft_reg = instr->FtValue();
- const int32_t fd_reg = instr->FdValue();
- int64_t i64hilo = 0;
- uint64_t u64hilo = 0;
-
- // ALU output.
- // It should not be used as is. Instructions using it should always
- // initialize it first.
- int32_t alu_out = 0x12345678;
-
- // For break and trap instructions.
- bool do_interrupt = false;
-
- // For jr and jalr.
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc
- int32_t next_pc = 0;
- int32_t return_addr_reg = 31;
// Set up the variables if needed before executing the instruction.
- ConfigureTypeRegister(instr, &alu_out, &i64hilo, &u64hilo, &next_pc,
- &return_addr_reg, &do_interrupt);
-
- // ---------- Raise exceptions triggered.
- SignalExceptions();
+ // ConfigureTypeRegister(instr);
+ set_instr(instr);
// ---------- Execution.
switch (op) {
case COP1:
- DecodeTypeRegisterCOP1(instr, rs_reg, rs, rs_u, rt_reg, rt, rt_u, rd_reg,
- fr_reg, fs_reg, ft_reg, fd_reg, i64hilo, u64hilo,
- alu_out, do_interrupt, current_pc, next_pc,
- return_addr_reg);
+ DecodeTypeRegisterCOP1();
break;
case COP1X:
- DecodeTypeRegisterCOP1X(instr, fr_reg, fs_reg, ft_reg, fd_reg);
+ DecodeTypeRegisterCOP1X();
break;
case SPECIAL:
- DecodeTypeRegisterSPECIAL(instr, rs_reg, rs, rs_u, rt_reg, rt, rt_u,
- rd_reg, fr_reg, fs_reg, ft_reg, fd_reg, i64hilo,
- u64hilo, alu_out, do_interrupt, current_pc,
- next_pc, return_addr_reg);
+ DecodeTypeRegisterSPECIAL();
break;
case SPECIAL2:
- DecodeTypeRegisterSPECIAL2(instr, rd_reg, alu_out);
+ DecodeTypeRegisterSPECIAL2();
break;
case SPECIAL3:
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3();
break;
- // Unimplemented opcodes raised an error in the configuration step before,
- // so we can use the default here to set the destination register in common
- // cases.
default:
- set_register(rd_reg, alu_out);
+ UNREACHABLE();
}
}
+// Branch instructions common part.
+#define BranchAndLinkHelper(do_branch) \
+ execute_branch_delay_instruction = true; \
+ if (do_branch) { \
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
+ set_register(31, current_pc + 2 * Instruction::kInstrSize); \
+ } else { \
+ next_pc = current_pc + 2 * Instruction::kInstrSize; \
+ }
+
+#define BranchHelper(do_branch) \
+ execute_branch_delay_instruction = true; \
+ if (do_branch) { \
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
+ } else { \
+ next_pc = current_pc + 2 * Instruction::kInstrSize; \
+ }
+
+
// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
- Opcode op = instr->OpcodeFieldRaw();
+ Opcode op = instr->OpcodeFieldRaw();
int32_t rs_reg = instr->RsValue();
- int32_t rs = get_register(instr->RsValue());
- uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtValue(); // Destination register.
- int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Value();
- int32_t imm19 = instr->Imm19Value();
+ int32_t rs = get_register(instr->RsValue());
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->RtValue(); // Destination register.
+ int32_t rt = get_register(rt_reg);
+ int16_t imm16 = instr->Imm16Value();
int32_t imm21 = instr->Imm21Value();
int32_t imm26 = instr->Imm26Value();
- int32_t ft_reg = instr->FtValue(); // Destination register.
- int64_t ft;
+ int32_t ft_reg = instr->FtValue(); // Destination register.
+ int64_t ft;
// Zero extended immediate.
- uint32_t oe_imm16 = 0xffff & imm16;
+ uint32_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int32_t se_imm16 = imm16;
- int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfff80000 : 0);
int32_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfc000000 : 0);
-
// Get current pc.
int32_t current_pc = get_pc();
// Next pc.
int32_t next_pc = bad_ra;
- // pc increment
- int16_t pc_increment;
// Used for conditional branch instructions.
- bool do_branch = false;
bool execute_branch_delay_instruction = false;
// Used for arithmetic instructions.
int32_t alu_out = 0;
- // Floating point.
- double fp_out = 0.0;
- uint32_t cc, cc_value, fcsr_cc;
// Used for memory instructions.
int32_t addr = 0x0;
- // Value to be written in memory.
- uint32_t mem_value = 0x0;
// ---------- Configuration (and execution for REGIMM).
switch (op) {
// ------------- COP1. Coprocessor instructions.
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- cc = instr->FBccValue();
- fcsr_cc = get_fcsr_condition_bit(cc);
- cc_value = test_fcsr_bit(fcsr_cc);
- do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ case BC1: { // Branch on coprocessor condition.
+ // Floating point.
+ uint32_t cc = instr->FBccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ uint32_t cc_value = test_fcsr_bit(fcsr_cc);
+ bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
execute_branch_delay_instruction = true;
// Set next_pc.
if (do_branch) {
@@ -4003,12 +3810,12 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
next_pc = current_pc + kBranchReturnOffset;
}
break;
+ }
case BC1EQZ:
ft = get_fpu_register(ft_reg);
- do_branch = (ft & 0x1) ? false : true;
execute_branch_delay_instruction = true;
// Set next_pc.
- if (do_branch) {
+ if (!(ft & 0x1)) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + kBranchReturnOffset;
@@ -4016,10 +3823,9 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
case BC1NEZ:
ft = get_fpu_register(ft_reg);
- do_branch = (ft & 0x1) ? true : false;
execute_branch_delay_instruction = true;
// Set next_pc.
- if (do_branch) {
+ if (ft & 0x1) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + kBranchReturnOffset;
@@ -4033,54 +3839,35 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
- do_branch = (rs < 0);
- break;
- case BLTZAL:
- do_branch = rs < 0;
+ BranchHelper(rs < 0);
break;
case BGEZ:
- do_branch = rs >= 0;
+ BranchHelper(rs >= 0);
+ break;
+ case BLTZAL:
+ BranchAndLinkHelper(rs < 0);
break;
case BGEZAL:
- do_branch = rs >= 0;
+ BranchAndLinkHelper(rs >= 0);
break;
default:
UNREACHABLE();
}
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- case BLTZAL:
- case BGEZ:
- case BGEZAL:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + kBranchReturnOffset);
- }
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
- default:
- break;
- }
- break; // case REGIMM.
+ break; // case REGIMM.
// ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
// need to replace rt with zero.
case BEQ:
- do_branch = (rs == rt);
+ BranchHelper(rs == rt);
break;
case BNE:
- do_branch = rs != rt;
+ BranchHelper(rs != rt);
break;
case BLEZ:
- do_branch = rs <= 0;
+ BranchHelper(rs <= 0);
break;
case BGTZ:
- do_branch = rs > 0;
+ BranchHelper(rs > 0);
break;
case POP66: {
if (rs_reg) { // BEQZC
@@ -4113,43 +3900,44 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case ADDI:
if (HaveSameSign(rs, se_imm16)) {
if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+ if (rs <= (Registers::kMaxValue - se_imm16)) {
+ SignalException(kIntegerOverflow);
+ }
} else if (rs < 0) {
- exceptions[kIntegerUnderflow] =
- rs < (Registers::kMinValue - se_imm16);
+ if (rs >= (Registers::kMinValue - se_imm16)) {
+ SignalException(kIntegerUnderflow);
+ }
}
}
- alu_out = rs + se_imm16;
+ SetResult(rt_reg, rs + se_imm16);
break;
case ADDIU:
- alu_out = rs + se_imm16;
+ SetResult(rt_reg, rs + se_imm16);
break;
case SLTI:
- alu_out = (rs < se_imm16) ? 1 : 0;
+ SetResult(rt_reg, rs < se_imm16 ? 1 : 0);
break;
case SLTIU:
- alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ SetResult(rt_reg, rs_u < static_cast<uint32_t>(se_imm16) ? 1 : 0);
break;
case ANDI:
- alu_out = rs & oe_imm16;
+ SetResult(rt_reg, rs & oe_imm16);
break;
case ORI:
- alu_out = rs | oe_imm16;
+ SetResult(rt_reg, rs | oe_imm16);
break;
case XORI:
- alu_out = rs ^ oe_imm16;
+ SetResult(rt_reg, rs ^ oe_imm16);
break;
case LUI:
- alu_out = (oe_imm16 << 16);
+ SetResult(rt_reg, oe_imm16 << 16);
break;
// ------------- Memory instructions.
case LB:
- addr = rs + se_imm16;
- alu_out = ReadB(addr);
+ set_register(rt_reg, ReadB(rs + se_imm16));
break;
case LH:
- addr = rs + se_imm16;
- alu_out = ReadH(addr, instr);
+ set_register(rt_reg, ReadH(rs + se_imm16, instr));
break;
case LWL: {
// al_offset is offset of the effective address within an aligned word.
@@ -4160,19 +3948,17 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = ReadW(addr, instr);
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
+ set_register(rt_reg, alu_out);
break;
}
case LW:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
+ set_register(rt_reg, ReadW(rs + se_imm16, instr));
break;
case LBU:
- addr = rs + se_imm16;
- alu_out = ReadBU(addr);
+ set_register(rt_reg, ReadBU(rs + se_imm16));
break;
case LHU:
- addr = rs + se_imm16;
- alu_out = ReadHU(addr, instr);
+ set_register(rt_reg, ReadHU(rs + se_imm16, instr));
break;
case LWR: {
// al_offset is offset of the effective address within an aligned word.
@@ -4183,58 +3969,64 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = ReadW(addr, instr);
alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
alu_out |= rt & mask;
+ set_register(rt_reg, alu_out);
break;
}
case SB:
- addr = rs + se_imm16;
+ WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break;
case SH:
- addr = rs + se_imm16;
+ WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr);
break;
case SWL: {
uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
uint8_t byte_shift = kPointerAlignmentMask - al_offset;
uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr) & mask;
+ // Value to be written in memory.
+ uint32_t mem_value = ReadW(addr, instr) & mask;
mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ WriteW(addr, mem_value, instr);
break;
}
case SW:
- addr = rs + se_imm16;
+ WriteW(rs + se_imm16, rt, instr);
break;
case SWR: {
uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
uint32_t mask = (1 << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr);
+ uint32_t mem_value = ReadW(addr, instr);
mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ WriteW(addr, mem_value, instr);
break;
}
case LWC1:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
+ set_fpu_register_hi_word(ft_reg, 0);
+ set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
break;
case LDC1:
- addr = rs + se_imm16;
- fp_out = ReadD(addr, instr);
+ set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr));
break;
case SWC1:
+ WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr);
+ break;
case SDC1:
- addr = rs + se_imm16;
+ WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
break;
// ------------- JIALC and BNEZC instructions.
- case POP76:
+ case POP76: {
// Next pc.
next_pc = rt + se_imm16;
// The instruction after the jump is NOT executed.
- pc_increment = Instruction::kInstrSize;
+ int16_t pc_increment = Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + pc_increment);
}
set_pc(next_pc);
pc_modified_ = true;
break;
+ }
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
@@ -4248,115 +4040,37 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = current_pc + (se_imm16 << 16);
break;
default: {
+ int32_t imm19 = instr->Imm19Value();
// rt field: checking the most significant 2-bits.
rt = (imm21 >> kImm19Bits);
switch (rt) {
case LWPC: {
- int32_t offset = imm19;
// Set sign.
- offset <<= (kOpcodeBits + kRsBits + 2);
- offset >>= (kOpcodeBits + kRsBits + 2);
- addr = current_pc + (offset << 2);
+ imm19 <<= (kOpcodeBits + kRsBits + 2);
+ imm19 >>= (kOpcodeBits + kRsBits + 2);
+ addr = current_pc + (imm19 << 2);
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
alu_out = *ptr;
break;
}
- case ADDIUPC:
+ case ADDIUPC: {
+ int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfff80000 : 0);
alu_out = current_pc + (se_imm19 << 2);
break;
+ }
default:
UNREACHABLE();
break;
}
}
}
+ set_register(rs_reg, alu_out);
break;
}
default:
UNREACHABLE();
}
- // ---------- Raise exceptions triggered.
- SignalExceptions();
-
- // ---------- Execution.
- switch (op) {
- // ------------- Branch instructions.
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstrSize);
- }
- } else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
- }
- break;
- // ------------- Arithmetic instructions.
- case ADDI:
- case ADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- set_register(rt_reg, alu_out);
- TraceRegWr(alu_out);
- break;
- // ------------- Memory instructions.
- case LB:
- case LH:
- case LWL:
- case LW:
- case LBU:
- case LHU:
- case LWR:
- set_register(rt_reg, alu_out);
- break;
- case SB:
- WriteB(addr, static_cast<int8_t>(rt));
- break;
- case SH:
- WriteH(addr, static_cast<uint16_t>(rt), instr);
- break;
- case SWL:
- WriteW(addr, mem_value, instr);
- break;
- case SW:
- WriteW(addr, rt, instr);
- break;
- case SWR:
- WriteW(addr, mem_value, instr);
- break;
- case LWC1:
- set_fpu_register_hi_word(ft_reg, 0);
- set_fpu_register_word(ft_reg, alu_out);
- break;
- case LDC1:
- set_fpu_register_double(ft_reg, fp_out);
- break;
- case SWC1:
- addr = rs + se_imm16;
- WriteW(addr, get_fpu_register_word(ft_reg), instr);
- break;
- case SDC1:
- addr = rs + se_imm16;
- WriteD(addr, get_fpu_register_double(ft_reg), instr);
- break;
- case PCREL:
- set_register(rs_reg, alu_out);
- default:
- break;
- }
-
-
if (execute_branch_delay_instruction) {
// Execute branch delay slot
// We don't check for end_sim_pc. First it should not be met as the current
@@ -4372,6 +4086,9 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
+#undef BranchHelper
+#undef BranchAndLinkHelper
+
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
@@ -4413,7 +4130,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
}
- switch (instr->InstructionType()) {
+ switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
case Instruction::kRegisterType:
DecodeTypeRegister(instr);
break;
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 6de5163dda..bd30172d5b 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -293,56 +293,51 @@ class Simulator {
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
- // Called from DecodeTypeRegisterCOP1
- void DecodeTypeRegisterDRsType(Instruction* instr, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg,
- const int32_t& fd_reg);
- void DecodeTypeRegisterWRsType(Instruction* instr, int32_t& alu_out,
- const int32_t& fd_reg, const int32_t& fs_reg,
- const int32_t& ft_reg);
- void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t& ft_reg,
- const int32_t& fs_reg, const int32_t& fd_reg);
- void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t& ft_reg,
- const int32_t& fs_reg, const int32_t& fd_reg);
-
- // Functions called from DeocodeTypeRegister
- void DecodeTypeRegisterCOP1(
- Instruction* instr, const int32_t& rs_reg, const int32_t& rs,
- const uint32_t& rs_u, const int32_t& rt_reg, const int32_t& rt,
- const uint32_t& rt_u, const int32_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int32_t& fd_reg,
- int64_t& i64hilo, uint64_t& u64hilo, int32_t& alu_out, bool& do_interrupt,
- int32_t& current_pc, int32_t& next_pc, int32_t& return_addr_reg);
-
-
- void DecodeTypeRegisterCOP1X(Instruction* instr, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg,
- const int32_t& fd_reg);
-
-
- void DecodeTypeRegisterSPECIAL(
- Instruction* instr, const int32_t& rs_reg, const int32_t& rs,
- const uint32_t& rs_u, const int32_t& rt_reg, const int32_t& rt,
- const uint32_t& rt_u, const int32_t& rd_reg, const int32_t& fr_reg,
- const int32_t& fs_reg, const int32_t& ft_reg, const int32_t& fd_reg,
- int64_t& i64hilo, uint64_t& u64hilo, int32_t& alu_out, bool& do_interrupt,
- int32_t& current_pc, int32_t& next_pc, int32_t& return_addr_reg);
-
-
- void DecodeTypeRegisterSPECIAL2(Instruction* instr, const int32_t& rd_reg,
- int32_t& alu_out);
-
- void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int32_t& rt_reg,
- const int32_t& rd_reg, int32_t& alu_out);
-
- // Helper function for DecodeTypeRegister.
- void ConfigureTypeRegister(Instruction* instr,
- int32_t* alu_out,
- int64_t* i64hilo,
- uint64_t* u64hilo,
- int32_t* next_pc,
- int32_t* return_addr_reg,
- bool* do_interrupt);
+ // Functions called from DecodeTypeRegister.
+ void DecodeTypeRegisterCOP1();
+
+ void DecodeTypeRegisterCOP1X();
+
+ void DecodeTypeRegisterSPECIAL();
+
+ void DecodeTypeRegisterSPECIAL2();
+
+ void DecodeTypeRegisterSPECIAL3();
+
+ // Called from DecodeTypeRegisterCOP1.
+ void DecodeTypeRegisterSRsType();
+
+ void DecodeTypeRegisterDRsType();
+
+ void DecodeTypeRegisterWRsType();
+
+ void DecodeTypeRegisterLRsType();
+
+ Instruction* currentInstr_;
+ inline Instruction* get_instr() const { return currentInstr_; }
+ inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
+
+ inline int32_t rs_reg() const { return currentInstr_->RsValue(); }
+ inline int32_t rs() const { return get_register(rs_reg()); }
+ inline uint32_t rs_u() const {
+ return static_cast<uint32_t>(get_register(rs_reg()));
+ }
+ inline int32_t rt_reg() const { return currentInstr_->RtValue(); }
+ inline int32_t rt() const { return get_register(rt_reg()); }
+ inline uint32_t rt_u() const {
+ return static_cast<uint32_t>(get_register(rt_reg()));
+ }
+ inline int32_t rd_reg() const { return currentInstr_->RdValue(); }
+ inline int32_t fr_reg() const { return currentInstr_->FrValue(); }
+ inline int32_t fs_reg() const { return currentInstr_->FsValue(); }
+ inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
+ inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
+ inline int32_t sa() const { return currentInstr_->SaValue(); }
+
+ inline void SetResult(int32_t rd_reg, int32_t alu_out) {
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
@@ -394,10 +389,9 @@ class Simulator {
kDivideByZero,
kNumExceptions
};
- int16_t exceptions[kNumExceptions];
// Exceptions.
- void SignalExceptions();
+ void SignalException(Exception e);
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
@@ -424,7 +418,7 @@ class Simulator {
static const size_t stack_size_ = 1 * 1024*1024;
char* stack_;
bool pc_modified_;
- int icount_;
+ uint64_t icount_;
int break_count_;
// Debugger input.
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 98dd71122a..cb5e164ff9 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -32,6 +32,8 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
+#include "src/mips64/assembler-mips64.h"
+
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
@@ -2376,6 +2378,7 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
if (is_int16(src.offset_)) {
GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
@@ -2396,6 +2399,7 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
if (is_int16(src.offset_)) {
GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
} else { // Offset > 16 bits, use multiple instructions to load.
@@ -2630,7 +2634,7 @@ void Assembler::mov_d(FPURegister fd, FPURegister fs) {
void Assembler::mov_s(FPURegister fd, FPURegister fs) {
- GenInstrRegister(COP1, S, f0, fs, fd, MOV_D);
+ GenInstrRegister(COP1, S, f0, fs, fd, MOV_S);
}
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 0164072333..be57f29806 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -79,6 +79,16 @@ struct Register {
static const int kSizeInBytes = 8;
static const int kCpRegister = 23; // cp (s7) is the 23rd register.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const int kMantissaOffset = 0;
+ static const int kExponentOffset = 4;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const int kMantissaOffset = 4;
+ static const int kExponentOffset = 0;
+#else
+#error Unknown endianness
+#endif
+
inline static int NumAllocatableRegisters();
static int ToAllocationIndex(Register reg) {
@@ -327,7 +337,8 @@ const FPURegister f31 = { 31 };
#define kLithiumScratchDouble f30
#define kDoubleRegZero f28
// Used on mips64r6 for compare operations.
-#define kDoubleCompareReg f31
+// We use the last non-callee saved odd register for N64 ABI
+#define kDoubleCompareReg f23
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index 5754117140..a736019da1 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -24,12 +24,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- a0 : number of arguments excluding receiver
// -- a1 : called function (only guaranteed when
// -- extra_args requires it)
- // -- cp : context
// -- sp[0] : last argument
// -- ...
// -- sp[8 * (argc - 1)] : first argument
// -- sp[8 * agrc] : receiver
// -----------------------------------
+ __ AssertFunction(a1);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -140,7 +147,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -148,121 +156,134 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
// -- sp[argc * 8] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
- Register function = a1;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
- __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ __ Drop(2);
}
- // Load the first arguments in a0 and get rid of the rest.
- Label no_arguments;
- __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
- // First args = sp[(argc - 1) * 8].
- __ Dsubu(a0, a0, Operand(1));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(sp, a0, sp);
- __ ld(a0, MemOperand(sp));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = a2;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(a0, // Input.
- argument, // Result.
- a3, // Scratch.
- a4, // Scratch.
- a5, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, a4);
- __ bind(&argument_is_string);
+ // 2a. At least one argument, return a0 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(a0, &to_string);
+ __ GetObjectType(a0, a1, a1);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ Subu(a1, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ Branch(&symbol_descriptive_string, eq, a1, Operand(zero_reg));
+ __ Branch(&to_string, gt, a1, Operand(zero_reg));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ }
- // ----------- S t a t e -------------
- // -- a2 : argument converted to string
- // -- a1 : constructor function
- // -- ra : return address
- // -----------------------------------
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+ __ DropAndRet(1);
+ }
- Label gc_required;
- __ Allocate(JSValue::kSize,
- v0, // Result.
- a3, // Scratch.
- a4, // Scratch.
- &gc_required,
- TAG_OBJECT);
-
- // Initialising the String Object.
- Register map = a3;
- __ LoadGlobalFunctionInitialMap(function, map, a4);
- if (FLAG_debug_code) {
- __ lbu(a4, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
- a4, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ lbu(a4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
- a4, Operand(zero_reg));
+ // 3a. Convert a0 to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
- __ sd(map, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ // 3b. Convert symbol in a0 to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ Push(a0);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- __ sd(argument, FieldMemOperand(v0, JSValue::kValueOffset));
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+ // -- sp[argc * 8] : receiver
+ // -----------------------------------
- __ Ret();
+ // 1. Load the first argument into a0 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
+ __ Dsubu(a0, a0, Operand(1));
+ __ dsll(a0, a0, kPointerSizeLog2);
+ __ Daddu(sp, a0, sp);
+ __ ld(a0, MemOperand(sp));
+ __ Drop(2);
+ __ jmp(&done);
+ __ bind(&no_arguments);
+ __ LoadRoot(a0, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ bind(&done);
+ }
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(a0, &convert_argument);
-
- // Is it a String?
- __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ And(a4, a3, Operand(kIsNotStringMask));
- __ Branch(&convert_argument, ne, a4, Operand(zero_reg));
- __ mov(argument, a0);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4);
- __ Branch(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into a2.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4);
+ // 2. Make sure a0 is a string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(a0);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ Label convert, done_convert;
+ __ JumpIfSmi(a0, &convert);
+ __ GetObjectType(a0, a2, a2);
+ __ And(t0, a2, Operand(kIsNotStringMask));
+ __ Branch(&done_convert, eq, t0, Operand(zero_reg));
+ __ bind(&convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(a1);
+ __ CallStub(&stub);
+ __ Move(a0, v0);
+ __ Pop(a1);
+ }
+ __ bind(&done_convert);
}
- __ pop(function);
- __ mov(argument, v0);
- __ Branch(&argument_is_string);
- // Load the empty string into a2, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ Branch(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, a4);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- a0 : the first argument
+ // -- a1 : constructor function
+ // -- ra : return address
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Allocate(JSValue::kSize, v0, a2, a3, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in eax.
+ __ LoadGlobalFunctionInitialMap(a1, a2, a3);
+ __ sd(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Move(a2, Smi::FromInt(JSValue::kSize));
+ __ Push(a0, a1, a2);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(a0, a1);
+ }
+ __ jmp(&done_allocate);
}
- __ Ret();
}
@@ -312,8 +333,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -323,9 +343,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
Isolate* isolate = masm->isolate();
// Enter a construct frame.
@@ -393,9 +410,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a2: initial map
Label rt_call_reload_new_target;
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
- if (create_memento) {
- __ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
- }
__ Allocate(a3, t0, t1, t2, &rt_call_reload_new_target, SIZE_IN_WORDS);
@@ -403,7 +417,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// a1: constructor function
// a2: initial map
- // a3: object size (including memento if create_memento)
+ // a3: object size
// t0: JSObject (not tagged)
__ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
__ mov(t1, t0);
@@ -418,7 +432,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with appropriate filler.
// a1: constructor function
// a2: initial map
- // a3: object size (in words, including memento if create_memento)
+ // a3: object size (in words)
// t0: JSObject (not tagged)
// t1: First in-object property of JSObject (not tagged)
// a6: slack tracking counter (non-API function case)
@@ -458,29 +472,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ Dsubu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize));
- __ dsll(a0, a0, kPointerSizeLog2);
- __ Daddu(a0, t0, Operand(a0)); // End of object.
- __ InitializeFieldsWithFiller(t1, a0, t3);
-
- // Fill in memento fields.
- // t1: points to the allocated but uninitialized memento.
- __ LoadRoot(t3, Heap::kAllocationMementoMapRootIndex);
- DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
- __ sd(t3, MemOperand(t1));
- __ Daddu(t1, t1, kPointerSize);
- // Load the AllocationSite.
- __ ld(t3, MemOperand(sp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(t3, a0);
- DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
- __ sd(t3, MemOperand(t1));
- __ Daddu(t1, t1, kPointerSize);
- } else {
- __ dsll(at, a3, kPointerSizeLog2);
- __ Daddu(a0, t0, Operand(at)); // End of object.
- __ InitializeFieldsWithFiller(t1, a0, t3);
- }
+ __ dsll(at, a3, kPointerSizeLog2);
+ __ Daddu(a0, t0, Operand(at)); // End of object.
+ __ InitializeFieldsWithFiller(t1, a0, t3);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -499,45 +493,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// a1: constructor function
// a3: original constructor
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ ld(a2, MemOperand(sp, 3 * kPointerSize));
- __ push(a2); // argument 1: allocation site
- }
__ Push(a1, a3); // arguments 2-3 / 1-2
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mov(t0, v0);
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
// Receiver for constructor call allocated.
// t0: JSObject
__ bind(&allocated);
- if (create_memento) {
- __ ld(a2, MemOperand(sp, 3 * kPointerSize));
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ Branch(&count_incremented, eq, a2, Operand(t1));
- // a2 is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ ld(a3, FieldMemOperand(a2,
- AllocationSite::kPretenureCreateCountOffset));
- __ Daddu(a3, a3, Operand(Smi::FromInt(1)));
- __ sd(a3, FieldMemOperand(a2,
- AllocationSite::kPretenureCreateCountOffset));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ Pop(a3); // new.target
__ Pop(a1);
@@ -635,12 +599,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -736,8 +700,7 @@ enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers a2; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset, Register argc,
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -757,12 +720,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
// Out of stack space.
- __ ld(a1, MemOperand(fp, calleeOffset));
- if (argc_is_tagged == kArgcIsUntaggedInt) {
- __ SmiTag(argc);
- }
- __ Push(a1, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -773,7 +731,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Called from JSEntryStub::GenerateBody
// ----------- S t a t e -------------
- // -- a0: code entry
+ // -- a0: new.target
// -- a1: function
// -- a2: receiver_pointer
// -- a3: argc
@@ -787,19 +745,21 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ li(cp, Operand(context_address));
+ __ ld(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
__ Push(a1, a2);
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Clobbers a2.
- Generate_CheckStackOverflow(masm, kFunctionOffset, a3, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, a3, kArgcIsUntaggedInt);
+
+ // Remember new.target.
+ __ mov(a5, a0);
// Copy arguments to the stack in a loop.
// a3: argc
@@ -818,6 +778,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&entry);
__ Branch(&loop, ne, s0, Operand(a6));
+ // Setup new.target and argc.
+ __ mov(a0, a3);
+ __ mov(a3, a5);
+
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
@@ -829,17 +793,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// s6 holds the root address. Do not clobber.
// s7 is cp. Do not init.
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- // No type feedback cell is available
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
- }
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
// Leave internal frame.
}
@@ -909,7 +867,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Dsubu(a5, sp, Operand(a4));
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
__ Branch(&ok, hs, a5, Operand(a2));
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -993,8 +951,11 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments.
- __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+
+ // Drop receiver + arguments and return.
+ __ lw(at, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ Daddu(sp, sp, at);
__ Jump(ra);
}
@@ -1246,128 +1207,31 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// a0: actual number of arguments
- { Label done;
+ {
+ Label done;
__ Branch(&done, ne, a0, Operand(zero_reg));
- __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
- __ push(a6);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ Daddu(a0, a0, Operand(1));
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
+ // 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
- Label slow, non_function;
__ dsll(at, a0, kPointerSizeLog2);
__ daddu(at, sp, at);
__ ld(a1, MemOperand(at));
- __ JumpIfSmi(a1, &non_function);
- __ GetObjectType(a1, a2, a2);
- __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // 3a. Patch the first argument if necessary when calling a function.
- // a0: actual number of arguments
- // a1: function
- Label shift_arguments;
- __ li(a4, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION.
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset));
- __ And(a7, a3, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ Branch(&shift_arguments, ne, a7, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a3).
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
- __ And(a7, a3, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ Branch(&shift_arguments, ne, a7, Operand(zero_reg));
- // Compute the receiver in sloppy mode.
- // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a2, sp, at);
- __ ld(a2, MemOperand(a2, -kPointerSize));
- // a0: actual number of arguments
- // a1: function
- // a2: first argument
- __ JumpIfSmi(a2, &convert_to_object, a6);
-
- __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_proxy, eq, a2, Operand(a3));
- __ LoadRoot(a3, Heap::kNullValueRootIndex);
- __ Branch(&use_global_proxy, eq, a2, Operand(a3));
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a2, a3, a3);
- __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- __ bind(&convert_to_object);
- // Enter an internal frame in order to preserve argument count.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(a0);
- __ Push(a0);
- __ mov(a0, a2);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a2, v0);
-
- __ pop(a0);
- __ SmiUntag(a0);
- // Leave internal frame.
- }
- // Restore the function to a1, and the flag to a4.
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(at, sp, at);
- __ ld(a1, MemOperand(at));
- __ Branch(USE_DELAY_SLOT, &patch_receiver);
- __ li(a4, Operand(0, RelocInfo::NONE32));
-
- __ bind(&use_global_proxy);
- __ ld(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a3, sp, at);
- __ sd(a2, MemOperand(a3, -kPointerSize));
-
- __ Branch(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ li(a4, Operand(1, RelocInfo::NONE32)); // Indicate function proxy.
- __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
-
- __ bind(&non_function);
- __ li(a4, Operand(2, RelocInfo::NONE32)); // Indicate non-function.
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- // a0: actual number of arguments
- // a1: function
- // a4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ dsll(at, a0, kPointerSizeLog2);
- __ daddu(a2, sp, at);
- __ sd(a1, MemOperand(a2, -kPointerSize));
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// a0: actual number of arguments
// a1: function
- // a4: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
- { Label loop;
+ {
+ Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
__ dsll(at, a0, kPointerSizeLog2);
__ daddu(a2, sp, at);
@@ -1383,47 +1247,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ Pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // a0: actual number of arguments
- // a1: function
- // a4: call type (0: JS function, 1: function proxy, 2: non-function)
- { Label function, non_proxy;
- __ Branch(&function, eq, a4, Operand(zero_reg));
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ mov(a2, zero_reg);
- __ Branch(&non_proxy, ne, a4, Operand(1));
-
- __ push(a1); // Re-add proxy object as additional argument.
- __ Daddu(a0, a0, Operand(1));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- // a0: actual number of arguments
- // a1: function
- __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- // The argument count is stored as int32_t on 64-bit platforms.
- // TODO(plind): Smi on 32-bit platforms.
- __ lw(a2,
- FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
-
- __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1491,107 +1316,36 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(a1);
__ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
- __ push(a0);
- __ ld(a0, MemOperand(fp, kArgumentsOffset)); // Get the args array.
- __ push(a0);
+ __ ld(a1, MemOperand(fp, kArgumentsOffset)); // Get the args array.
+ __ Push(a0, a1);
// Returns (in v0) number of arguments to copy to stack as Smi.
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
// Returns the result in v0.
- Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ mov(a1, zero_reg);
- __ Push(v0, a1); // Limit and initial index.
-
- // Get the receiver.
- __ ld(a0, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ ld(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Change context eagerly to get the right global object if necessary.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in a1.
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_proxy;
- __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset));
- __ And(a7, a7, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ Branch(&push_receiver, ne, a7, Operand(zero_reg));
-
- // Do not transform the receiver for native (Compilerhints already in a2).
- __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
- __ And(a7, a7, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ Branch(&push_receiver, ne, a7, Operand(zero_reg));
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(a0, &call_to_object);
- __ LoadRoot(a1, Heap::kNullValueRootIndex);
- __ Branch(&use_global_proxy, eq, a0, Operand(a1));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ Branch(&use_global_proxy, eq, a0, Operand(a2));
-
- // Check if the receiver is already a JavaScript object.
- // a0: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ GetObjectType(a0, a1, a1);
- __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
- // Convert the receiver to a regular object.
- // a0: receiver
- __ bind(&call_to_object);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(a0, v0); // Put object in a0 to match other paths to push_receiver.
- __ Branch(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ ld(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ld(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- // a0: receiver
- __ bind(&push_receiver);
- __ push(a0);
+ __ ld(a2, MemOperand(fp, kReceiverOffset));
+ __ Push(v0, a1, a2); // limit, initial index and receiver.
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(a0);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ ld(a1, MemOperand(fp, kFunctionOffset));
- __ GetObjectType(a1, a2, a2);
- __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
- frame_scope.GenerateLeaveFrame();
- __ Ret(USE_DELAY_SLOT);
- __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(a1); // Add function proxy as last argument.
- __ Daddu(a0, a0, Operand(1));
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
@@ -1634,10 +1388,11 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ ld(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
__ push(a0);
// Returns argument count in v0.
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
// Returns result in v0.
- Generate_CheckStackOverflow(masm, kFunctionOffset, v0, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, v0, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
@@ -1732,6 +1487,254 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(a1);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+ __ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ Branch(&done_convert, ne, at, Operand(zero_reg));
+ {
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ ld(a3, MemOperand(at));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- a3 : the receiver
+ // -- cp : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+ __ JumpIfRoot(a3, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(a3, v0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ sd(a3, MemOperand(at));
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ lw(a2,
+ FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(a0);
+ ParameterCount expected(a2);
+ __ InvokeCode(a3, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(a1, &non_callable);
+ __ bind(&non_smi);
+ __ GetObjectType(a1, t1, t2);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
+ eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ ld(a1, FieldMemOperand(a1, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(a1);
+ __ Branch(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+ // Overwrite the original receiver with the (original) target.
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ sd(a1, MemOperand(at));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunction)
+ // -- a3 : the original constructor (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(a1);
+ __ AssertFunction(a3);
+
+ // Calling convention for function specific ConstructStubs require
+ // a2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunctionProxy)
+ // -- a3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ ld(a1, FieldMemOperand(a1, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(a1, &non_constructor);
+ __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t2, t2, Operand(1 << Map::kIsCallable));
+ __ Branch(&non_constructor, eq, t2, Operand(zero_reg));
+
+ // Dispatch based on instance type.
+ __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq, t2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ dsll(at, a0, kPointerSizeLog2);
+ __ daddu(at, sp, at);
+ __ sd(a1, MemOperand(at));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+
+ // Find the address of the last argument.
+ __ Daddu(a3, a0, Operand(1)); // Add one for receiver.
+ __ dsll(a3, a3, kPointerSizeLog2);
+ __ Dsubu(a3, a2, Operand(a3));
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ __ ld(a4, MemOperand(a2));
+ __ Daddu(a2, a2, Operand(-kPointerSize));
+ __ push(a4);
+ __ bind(&loop_check);
+ __ Branch(&loop_header, gt, a2, Operand(a3));
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
// ----------- S t a t e -------------
@@ -1759,26 +1762,27 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into a0 and copy end address into a2.
+ // Calculate copy start address into a0 and copy end address into a4.
__ SmiScale(a0, a0, kPointerSizeLog2);
__ Daddu(a0, fp, a0);
// Adjust for return address and receiver.
__ Daddu(a0, a0, Operand(2 * kPointerSize));
// Compute copy end address.
- __ dsll(a2, a2, kPointerSizeLog2);
- __ dsubu(a2, a0, a2);
+ __ dsll(a4, a2, kPointerSizeLog2);
+ __ dsubu(a4, a0, a4);
// Copy the arguments (including the receiver) to the new stack frame.
// a0: copy start address
// a1: function
- // a2: copy end address
+ // a2: expected number of arguments
// a3: code entry to call
+ // a4: copy end address
Label copy;
__ bind(&copy);
- __ ld(a4, MemOperand(a0));
- __ push(a4);
- __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+ __ ld(a5, MemOperand(a0));
+ __ push(a5);
+ __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a4));
__ daddiu(a0, a0, -kPointerSize); // In delay slot.
__ jmp(&invoke);
@@ -1809,7 +1813,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into a0 and copy end address is fp.
+ // Calculate copy start address into a0 and copy end address into a7.
// a0: actual number of arguments as a smi
// a1: function
// a2: expected number of arguments
@@ -1839,23 +1843,25 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a1: function
// a2: expected number of arguments
// a3: code entry to call
- __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
__ dsll(a6, a2, kPointerSizeLog2);
- __ Dsubu(a2, fp, Operand(a6));
+ __ Dsubu(a4, fp, Operand(a6));
// Adjust for frame.
- __ Dsubu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
- 2 * kPointerSize));
+ __ Dsubu(a4, a4, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ 2 * kPointerSize));
Label fill;
__ bind(&fill);
__ Dsubu(sp, sp, kPointerSize);
- __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
- __ sd(a4, MemOperand(sp));
+ __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a4));
+ __ sd(a5, MemOperand(sp));
}
// Call the entry point.
__ bind(&invoke);
-
+ __ mov(a0, a2);
+ // a0 : expected number of arguments
+ // a1 : function (passed through to callee)
__ Call(a3);
// Store offset of return address for deoptimizer.
@@ -1876,7 +1882,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ break_(0xCC);
}
}
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 191b9607f8..d0c05ad0cc 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -11,6 +11,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/mips64/code-stubs-mips64.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
@@ -181,8 +182,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low, MemOperand(input_reg, double_offset));
- __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+ __ lw(input_low,
+ MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+ __ lw(input_high,
+ MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -721,29 +724,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// a1 (rhs) second.
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cc == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cc == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cc == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result.
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // Remaining cases.
- ncr = LESS;
- }
- __ li(a0, Operand(Smi::FromInt(ncr)));
- __ push(a0);
+ DCHECK(cc == gt || cc == ge); // Remaining cases.
+ ncr = LESS;
}
+ __ li(a0, Operand(Smi::FromInt(ncr)));
+ __ push(a0);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1413,202 +1412,105 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
}
-// Uses registers a0 to a4.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: a0 or at sp + 1 * kPointerSize.
-// * function: a1 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register a4.
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = a0; // Object (lhs).
- Register map = a3; // Map of the object.
- const Register function = a1; // Function (rhs).
- const Register prototype = a4; // Prototype of the function.
- const Register inline_site = t1;
- const Register scratch = a2;
-
- const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
-
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ ld(object, MemOperand(sp, 1 * kPointerSize));
- __ ld(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
- __ Branch(&miss, ne, function, Operand(at));
- __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
- __ Branch(&miss, ne, map, Operand(at));
- __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = a1; // Object (lhs).
+ Register const function = a0; // Function (rhs).
+ Register const object_map = a2; // Map of {object}.
+ Register const function_map = a3; // Map of {function}.
+ Register const function_prototype = a4; // Prototype of {function}.
+ Register const scratch = a5;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+ __ Branch(&fast_case, ne, function, Operand(at));
+ __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+ __ Branch(&fast_case, ne, object_map, Operand(at));
+ __ Ret(USE_DELAY_SLOT);
+ __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
+
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ GetObjectType(function, function_map, scratch);
+ __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex); // In delay slot.
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ GetObjectType(function, function_map, scratch);
+ __ Branch(&slow_case, ne, scratch, Operand(JS_FUNCTION_TYPE));
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- DCHECK(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- // The offset was stored in a4 safepoint slot.
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- __ LoadFromSafepointRegisterSlot(scratch, a4);
- __ Dsubu(inline_site, ra, scratch);
- // Get the map location in scratch and patch it.
- __ GetRelocatedValue(inline_site, scratch, v1); // v1 used as scratch.
- __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
-
- __ mov(t0, map);
- // |scratch| points at the beginning of the cell. Calculate the
- // field containing the map.
- __ Daddu(function, scratch, Operand(Cell::kValueOffset - 1));
- __ RecordWriteField(scratch, Cell::kValueOffset, t0, function,
- kRAHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
-
- // Register mapping: a3 is object map and a4 is function prototype.
- // Get prototype of object into a2.
- __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // Ensure that {function} has an instance prototype.
+ __ lbu(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ And(at, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
- // We don't need map any more. Use it as a scratch register.
- Register scratch2 = map;
- map = no_reg;
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ ld(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(scratch,
+ FieldMemOperand(shared_info, SharedFunctionInfo::kBoundByteOffset));
+ __ And(at, scratch, Operand(1 << SharedFunctionInfo::kBoundBitWithinByte));
+ __ Branch(&slow_case, ne, at, Operand(zero_reg));
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+ // Get the "prototype" (or initial map) of the {function}.
+ __ ld(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ GetObjectType(function_prototype, scratch, scratch);
+ __ Branch(&function_prototype_valid, ne, scratch, Operand(MAP_TYPE));
+ __ ld(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ Branch(&is_instance, eq, scratch, Operand(prototype));
- __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
- __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ Branch(&loop);
-
- __ bind(&is_instance);
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- if (!HasCallSiteInlineCheck()) {
- __ mov(v0, zero_reg);
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- }
- } else {
- // Patch the call site to return true.
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ mov(v0, zero_reg);
- }
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- }
- } else {
- // Patch the call site to return false.
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
- // Get the boolean result location in scratch and patch it.
- __ PatchRelocatedValue(inline_site, scratch, v0);
-
- if (!ReturnTrueFalseObject()) {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- }
-
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ GetObjectType(function, scratch2, scratch);
- __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- // Null is not instance of anything.
- __ Branch(&object_not_null, ne, object,
- Operand(isolate()->factory()->null_value()));
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- } else {
- __ li(v0, Operand(Smi::FromInt(1)));
- }
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+ __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Branch(&done, eq, object_prototype, Operand(function_prototype));
+ __ Branch(USE_DELAY_SLOT, &loop, ne, object_prototype, Operand(null));
+ __ ld(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ Ret(USE_DELAY_SLOT);
+ __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex); // In delay slot.
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(a0, a1);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(a0, a1);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- __ mov(a0, v0);
- __ LoadRoot(v0, Heap::kTrueValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
- __ LoadRoot(v0, Heap::kFalseValueRootIndex);
- __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1684,73 +1586,70 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime,
- ne,
- a2,
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&runtime, ne, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// Patch the arguments.length and the parameters pointer in the current frame.
- __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a2, MemOperand(sp, 0 * kPointerSize));
+ __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiScale(a7, a2, kPointerSizeLog2);
- __ Daddu(a3, a3, Operand(a7));
- __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
- __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+ __ Daddu(a4, a4, Operand(a7));
+ __ daddiu(a3, a4, StandardFrameConstants::kCallerSPOffset);
__ bind(&runtime);
+ __ Push(a1, a3, a2);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[4] : address of receiver argument
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
// Registers used over whole function:
- // a6 : allocated object (tagged)
- // t1 : mapped parameter count (tagged)
+ // a5 : arguments count (tagged)
+ // a6 : mapped parameter count (tagged)
- __ ld(a1, MemOperand(sp, 0 * kPointerSize));
- // a1 = parameter count (tagged)
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a2,
+ Label adaptor_frame, try_allocate, runtime;
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&adaptor_frame, eq, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
// No adaptor, parameter count = argument count.
- __ mov(a2, a1);
- __ Branch(&try_allocate);
+ __ mov(a5, a2);
+ __ Branch(USE_DELAY_SLOT, &try_allocate);
+ __ mov(a6, a2); // In delay slot.
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiScale(t2, a2, kPointerSizeLog2);
- __ Daddu(a3, a3, Operand(t2));
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 1 * kPointerSize));
-
- // a1 = parameter count (tagged)
- // a2 = argument count (tagged)
- // Compute the mapped parameter count = min(a1, a2) in a1.
- Label skip_min;
- __ Branch(&skip_min, lt, a1, Operand(a2));
- __ mov(a1, a2);
- __ bind(&skip_min);
+ __ ld(a5, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a4, a4, Operand(t2));
+ __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // a5 = argument count (tagged)
+ // a6 = parameter count (tagged)
+ // Compute the mapped parameter count = min(a6, a5) in a6.
+ __ mov(a6, a2);
+ __ Branch(&try_allocate, le, a6, Operand(a5));
+ __ mov(a6, a5);
__ bind(&try_allocate);
@@ -1761,14 +1660,14 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// If there are no mapped parameters, we do not need the parameter_map.
Label param_map_size;
DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
- __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a1 == 0.
- __ SmiScale(t1, a1, kPointerSizeLog2);
+ __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a6, Operand(zero_reg));
+ __ mov(t1, zero_reg); // In delay slot: param map size = 0 when a6 == 0.
+ __ SmiScale(t1, a6, kPointerSizeLog2);
__ daddiu(t1, t1, kParameterMapHeaderSize);
__ bind(&param_map_size);
// 2. Backing store.
- __ SmiScale(t2, a2, kPointerSizeLog2);
+ __ SmiScale(t2, a5, kPointerSizeLog2);
__ Daddu(t1, t1, Operand(t2));
__ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
@@ -1776,7 +1675,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
+ __ Allocate(t1, v0, a4, t1, &runtime, TAG_OBJECT);
// v0 = address of new object(s) (tagged)
// a2 = argument count (smi-tagged)
@@ -1789,36 +1688,36 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
Label skip2_ne, skip2_eq;
- __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+ __ Branch(&skip2_ne, ne, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kNormalOffset));
__ bind(&skip2_ne);
- __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+ __ Branch(&skip2_eq, eq, a6, Operand(zero_reg));
__ ld(a4, MemOperand(a4, kAliasedOffset));
__ bind(&skip2_eq);
// v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
// a2 = argument count (smi-tagged)
// a4 = address of arguments map (tagged)
+ // a6 = mapped parameter count (tagged)
__ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(t1, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(t1, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(t1, FieldMemOperand(v0, JSObject::kElementsOffset));
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ld(a3, MemOperand(sp, 2 * kPointerSize));
- __ AssertNotSmi(a3);
+ __ AssertNotSmi(a1);
const int kCalleeOffset = JSObject::kHeaderSize +
Heap::kArgumentsCalleeIndex * kPointerSize;
- __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
+ __ sd(a1, FieldMemOperand(v0, kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
+ __ AssertSmi(a5);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset = JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize;
- __ sd(a2, FieldMemOperand(v0, kLengthOffset));
+ __ sd(a5, FieldMemOperand(v0, kLengthOffset));
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, a4 will point there, otherwise
@@ -1827,29 +1726,29 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
// v0 = address of new object (tagged)
- // a1 = mapped parameter count (tagged)
// a2 = argument count (tagged)
// a4 = address of parameter map or backing store (tagged)
+ // a6 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
Label skip3;
- __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
- // Move backing store address to a3, because it is
+ __ Branch(&skip3, ne, a6, Operand(Smi::FromInt(0)));
+ // Move backing store address to a1, because it is
// expected there when filling in the unmapped arguments.
- __ mov(a3, a4);
+ __ mov(a1, a4);
__ bind(&skip3);
- __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+ __ Branch(&skip_parameter_map, eq, a6, Operand(Smi::FromInt(0)));
- __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
- __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ LoadRoot(a5, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ Daddu(a5, a6, Operand(Smi::FromInt(2)));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kLengthOffset));
__ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
- __ SmiScale(t2, a1, kPointerSizeLog2);
- __ Daddu(a6, a4, Operand(t2));
- __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
- __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(a5, a4, Operand(t2));
+ __ Daddu(a5, a5, Operand(kParameterMapHeaderSize));
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
@@ -1860,71 +1759,71 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
Label parameters_loop, parameters_test;
- __ mov(a6, a1);
- __ ld(t1, MemOperand(sp, 0 * kPointerSize));
- __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ Dsubu(t1, t1, Operand(a1));
+ __ mov(a5, a6);
+ __ Daddu(t1, a2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ Dsubu(t1, t1, Operand(a6));
__ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
- __ SmiScale(t2, a6, kPointerSizeLog2);
- __ Daddu(a3, a4, Operand(t2));
- __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
+ __ SmiScale(t2, a5, kPointerSizeLog2);
+ __ Daddu(a1, a4, Operand(t2));
+ __ Daddu(a1, a1, Operand(kParameterMapHeaderSize));
- // a6 = loop variable (tagged)
- // a1 = mapping index (tagged)
- // a3 = address of backing store (tagged)
+ // a1 = address of backing store (tagged)
// a4 = address of parameter map (tagged)
- // a5 = temporary scratch (a.o., for address calculation)
+ // a0 = temporary scratch (a.o., for address calculation)
+ // t1 = loop variable (tagged)
// a7 = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
-
- __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
- __ SmiScale(a5, a6, kPointerSizeLog2);
- __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ Daddu(t2, a4, a5);
+ __ Dsubu(a5, a5, Operand(Smi::FromInt(1)));
+ __ SmiScale(a0, a5, kPointerSizeLog2);
+ __ Daddu(a0, a0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ Daddu(t2, a4, a0);
__ sd(t1, MemOperand(t2));
- __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ Daddu(t2, a3, a5);
+ __ Dsubu(a0, a0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ Daddu(t2, a1, a0);
__ sd(a7, MemOperand(t2));
__ Daddu(t1, t1, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
- __ Branch(&parameters_loop, ne, a6, Operand(Smi::FromInt(0)));
+ __ Branch(&parameters_loop, ne, a5, Operand(Smi::FromInt(0)));
+
+ // Restore t1 = argument count (tagged).
+ __ ld(a5, FieldMemOperand(v0, kLengthOffset));
__ bind(&skip_parameter_map);
- // a2 = argument count (tagged)
- // a3 = address of backing store (tagged)
- // a5 = scratch
+ // v0 = address of new object (tagged)
+ // a1 = address of backing store (tagged)
+ // a5 = argument count (tagged)
+ // a6 = mapped parameter count (tagged)
+ // t1 = scratch
// Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
- __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
- __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ sd(t1, FieldMemOperand(a1, FixedArray::kMapOffset));
+ __ sd(a5, FieldMemOperand(a1, FixedArray::kLengthOffset));
Label arguments_loop, arguments_test;
- __ mov(t1, a1);
- __ ld(a4, MemOperand(sp, 1 * kPointerSize));
- __ SmiScale(t2, t1, kPointerSizeLog2);
- __ Dsubu(a4, a4, Operand(t2));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Dsubu(a3, a3, Operand(t2));
__ jmp(&arguments_test);
__ bind(&arguments_loop);
- __ Dsubu(a4, a4, Operand(kPointerSize));
- __ ld(a6, MemOperand(a4, 0));
- __ SmiScale(t2, t1, kPointerSizeLog2);
- __ Daddu(a5, a3, Operand(t2));
- __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+ __ Dsubu(a3, a3, Operand(kPointerSize));
+ __ ld(a4, MemOperand(a3, 0));
+ __ SmiScale(t2, a6, kPointerSizeLog2);
+ __ Daddu(t1, a1, Operand(t2));
+ __ sd(a4, FieldMemOperand(t1, FixedArray::kHeaderSize));
+ __ Daddu(a6, a6, Operand(Smi::FromInt(1)));
__ bind(&arguments_test);
- __ Branch(&arguments_loop, lt, t1, Operand(a2));
+ __ Branch(&arguments_loop, lt, a6, Operand(a5));
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
+ // Return.
+ __ Ret();
// Do the runtime call to allocate the arguments object.
- // a2 = argument count (tagged)
+ // a5 = argument count (tagged)
__ bind(&runtime);
- __ sd(a2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ Push(a1, a3, a5);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -1953,46 +1852,40 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
+ // a1 : function
+ // a2 : number of parameters (tagged)
+ // a3 : parameters pointer
+
+ DCHECK(a1.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(a2.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(a3.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&adaptor_frame,
- eq,
- a3,
+ Label try_allocate, runtime;
+ __ ld(a4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ld(a0, MemOperand(a4, StandardFrameConstants::kContextOffset));
+ __ Branch(&try_allocate, ne, a0,
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- // Get the length from the frame.
- __ ld(a1, MemOperand(sp, 0));
- __ Branch(&try_allocate);
-
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a1, MemOperand(sp, 0));
- __ SmiScale(at, a1, kPointerSizeLog2);
-
- __ Daddu(a3, a2, Operand(at));
-
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+ __ ld(a2, MemOperand(a4, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiScale(at, a2, kPointerSizeLog2);
+ __ Daddu(a4, a4, Operand(at));
+ __ Daddu(a3, a4, Operand(StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
- __ SmiUntag(a1);
+ __ SmiUntag(t1, a2);
+ __ Branch(&add_arguments_object, eq, a2, Operand(zero_reg));
- __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+ __ Daddu(t1, t1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ Allocate(a1, v0, a2, a3, &runtime,
+ __ Allocate(t1, v0, a4, a5, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
@@ -2002,88 +1895,55 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
- __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ LoadRoot(a5, Heap::kEmptyFixedArrayRootIndex);
+ __ sd(a5, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ sd(a5, FieldMemOperand(v0, JSObject::kElementsOffset));
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ ld(a1, MemOperand(sp, 0 * kPointerSize));
- __ AssertSmi(a1);
- __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ __ AssertSmi(a2);
+ __ sd(a2,
+ FieldMemOperand(v0, JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize));
Label done;
- __ Branch(&done, eq, a1, Operand(zero_reg));
-
- // Get the parameters pointer from the stack.
- __ ld(a2, MemOperand(sp, 1 * kPointerSize));
+ __ Branch(&done, eq, a2, Operand(zero_reg));
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
__ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
- __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
- __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
- // Untag the length for the loop.
- __ SmiUntag(a1);
-
+ __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
+ __ sd(a5, FieldMemOperand(a4, FixedArray::kMapOffset));
+ __ sd(a2, FieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ SmiUntag(a2);
// Copy the fixed array slots.
Label loop;
// Set up a4 to point to the first array slot.
__ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
- // Pre-decrement a2 with kPointerSize on each iteration.
+ // Pre-decrement a3 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ Daddu(a2, a2, Operand(-kPointerSize));
- __ ld(a3, MemOperand(a2));
+ __ Daddu(a3, a3, Operand(-kPointerSize));
+ __ ld(a5, MemOperand(a3));
// Post-increment a4 with kPointerSize on each iteration.
- __ sd(a3, MemOperand(a4));
+ __ sd(a5, MemOperand(a4));
__ Daddu(a4, a4, Operand(kPointerSize));
- __ Dsubu(a1, a1, Operand(1));
- __ Branch(&loop, ne, a1, Operand(zero_reg));
+ __ Dsubu(a2, a2, Operand(1));
+ __ Branch(&loop, ne, a2, Operand(zero_reg));
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ DropAndRet(3);
+ __ Ret();
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ Push(a1, a3, a2);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // sp[0] : language mode
- // sp[4] : index of rest parameter
- // sp[8] : number of parameters
- // sp[12] : receiver displacement
- // Check if the calling frame is an arguments adaptor frame.
-
- Label runtime;
- __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
- __ Branch(&runtime, ne, a3,
- Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
-
- // Patch the arguments.length and the parameters pointer.
- __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ sd(a1, MemOperand(sp, 2 * kPointerSize));
- __ SmiScale(at, a1, kPointerSizeLog2);
-
- __ Daddu(a3, a2, Operand(at));
-
- __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
- __ sd(a3, MemOperand(sp, 3 * kPointerSize));
-
- // Do the runtime call to allocate the arguments object.
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2608,27 +2468,24 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ Branch(&done, eq, a5, Operand(at));
__ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
- __ Branch(FLAG_pretenuring_call_new ? &miss : &check_allocation_site, ne,
- feedback_map, Operand(at));
+ __ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, feedback_map, Operand(at));
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&miss, ne, feedback_map, Operand(at));
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
- __ Branch(&megamorphic, ne, a1, Operand(a5));
- __ jmp(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ Branch(&megamorphic, ne, a1, Operand(a5));
+ __ jmp(&done);
__ bind(&miss);
@@ -2647,23 +2504,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
- __ Branch(&not_array_function, ne, a1, Operand(a5));
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ Branch(&done);
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a5);
+ __ Branch(&not_array_function, ne, a1, Operand(a5));
+
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ Branch(&done);
- __ bind(&not_array_function);
- }
+ __ bind(&not_array_function);
- CreateWeakCellStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2673,7 +2528,7 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
// Do not transform the receiver for strict mode functions.
int32_t strict_mode_function_mask =
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte ;
+ 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
// Do not transform the receiver for native (Compilerhints already in a3).
int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
@@ -2686,30 +2541,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
- __ push(a1); // put proxy as additional argument
- __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
- __ mov(a2, zero_reg);
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ sd(a1, MemOperand(sp, argc * kPointerSize));
- __ li(a0, Operand(argc)); // Set up the number of arguments.
- __ mov(a2, zero_reg);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ li(a0, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2731,12 +2565,12 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// a1 : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// a1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ __ JumpIfSmi(a1, &slow);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, a4, a4);
@@ -2770,7 +2604,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -2792,33 +2626,27 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// a2 : feedback vector
// a3 : slot in feedback vector (Smi, for RecordCallTarget)
// a4 : original constructor (for IsSuperConstructorCall)
- Label slow, non_function_call;
+
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(a1, &non_function_call);
+ __ JumpIfSmi(a1, &non_function);
// Check that the function is a JSFunction.
__ GetObjectType(a1, a5, a5);
- __ Branch(&slow, ne, a5, Operand(JS_FUNCTION_TYPE));
+ __ Branch(&non_function, ne, a5, Operand(JS_FUNCTION_TYPE));
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ dsrl(at, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, at);
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into a2.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by a3 + 1.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
- __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into a2, or undefined.
+ __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(a2, a5);
}
@@ -2830,30 +2658,16 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mov(a3, a1);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = a4;
- __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(jmp_reg, FieldMemOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
- // a0: number of arguments
- // a1: called object
- // a5: object type
- Label do_call;
- __ bind(&slow);
- __ Branch(&non_function_call, ne, a5, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ li(a2, Operand(0, RelocInfo::NONE32));
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ bind(&non_function);
+ __ mov(a3, a1);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@@ -2897,33 +2711,15 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
}
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(vector, FieldMemOperand(vector,
- JSFunction::kSharedFunctionInfoOffset));
- __ ld(vector, FieldMemOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
-}
-
-
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// a1 - function
// a3 - slot id
// a2 - vector
- Label miss;
-
+ // a4 - allocation site (loaded from vector[slot])
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
- __ Branch(&miss, ne, a1, Operand(at));
+ __ Branch(miss, ne, a1, Operand(at));
__ li(a0, Operand(arg_count()));
- __ dsrl(at, a3, 32 - kPointerSizeLog2);
- __ Daddu(at, a2, Operand(at));
- __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize));
-
- // Verify that a4 contains an AllocationSite
- __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
- __ Branch(&miss, ne, a5, Operand(at));
// Increment the call count for monomorphic function calls.
__ dsrl(t0, a3, 32 - kPointerSizeLog2);
@@ -2936,18 +2732,6 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ mov(a3, a1);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- // Unreachable.
- __ stop("Unexpected code address");
}
@@ -2960,7 +2744,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -3014,7 +2798,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -3022,11 +2806,20 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&slow_start, eq, a4, Operand(at));
+ // Verify that a4 contains an AllocationSite
+ __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+ __ Branch(&not_allocation_site, ne, a5, Operand(at));
+
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -3101,8 +2894,8 @@ void CallICStub::Generate(MacroAssembler* masm) {
// the slow case
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
- // r1: pushed function (to be verified)
- __ JumpIfSmi(a1, &non_function);
+ // a1: pushed function (to be verified)
+ __ JumpIfSmi(a1, &slow);
// Goto slow case if we do not have a function.
__ GetObjectType(a1, a4, a4);
@@ -3118,10 +2911,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(a1, a2, a3);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss //
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to a1 and exit the internal frame.
__ mov(a1, v0);
@@ -3521,7 +3311,7 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
Label not_string, slow_string;
__ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
// Check if string has a cached array index.
- __ ld(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+ __ lwu(a2, FieldMemOperand(a0, String::kHashFieldOffset));
__ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
__ Branch(&slow_string, ne, at, Operand(zero_reg));
__ IndexFromHash(a2, a0);
@@ -3539,7 +3329,39 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(a0); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes on argument in a0.
+ Label is_number;
+ __ JumpIfSmi(a0, &is_number);
+
+ Label not_string;
+ __ GetObjectType(a0, a1, a1);
+ // a0: receiver
+ // a1: receiver instance type
+ __ Branch(&not_string, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
+ __ bind(&not_string);
+
+ Label not_heap_number;
+ __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+ __ Ret(USE_DELAY_SLOT);
+ __ ld(v0, FieldMemOperand(a0, Oddball::kToStringOffset));
+ __ bind(&not_oddball);
+
+ __ push(a0); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3649,35 +3471,34 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ ld(a1, MemOperand(sp, 1 * kPointerSize)); // Left.
- __ ld(a0, MemOperand(sp, 0 * kPointerSize)); // Right.
+ // ----------- S t a t e -------------
+ // -- a1 : left
+ // -- a0 : right
+ // -- ra : return address
+ // -----------------------------------
+ __ AssertString(a1);
+ __ AssertString(a0);
Label not_same;
__ Branch(&not_same, ne, a0, Operand(a1));
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ li(v0, Operand(Smi::FromInt(EQUAL)));
- __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
- __ DropAndRet(2);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a1,
+ a2);
+ __ Ret();
__ bind(&not_same);
- // Check that both objects are sequential one_byte strings.
+ // Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
- // Compare flat one_byte strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
- __ Daddu(sp, sp, Operand(2 * kPointerSize));
- StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
+ // Compare flat ASCII strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+ a3);
+ StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
+ __ Push(a1, a0);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3710,6 +3531,30 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ AssertSmi(a1);
+ __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ AssertSmi(a0);
+ }
+ __ Ret(USE_DELAY_SLOT);
+ __ Dsubu(v0, a1, a0);
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -4027,8 +3872,20 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ Branch(&miss, ne, a2, Operand(a4));
__ Branch(&miss, ne, a3, Operand(a4));
- __ Ret(USE_DELAY_SLOT);
- __ dsubu(v0, a0, a1);
+ if (Token::IsEqualityOp(op())) {
+ __ Ret(USE_DELAY_SLOT);
+ __ dsubu(v0, a0, a1);
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ li(a2, Operand(Smi::FromInt(GREATER)));
+ } else {
+ __ li(a2, Operand(Smi::FromInt(LESS)));
+ }
+ __ Push(a1, a0, a2);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4627,33 +4484,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, a2);
+ __ EmitLoadTypeFeedbackVector(a2);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, a2);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4662,11 +4512,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4776,8 +4625,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
__ Branch(&not_array, ne, scratch1, Operand(at));
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, a7, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
__ bind(&not_array);
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
@@ -4838,8 +4686,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, a7, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, true, &miss);
__ bind(&not_array);
// Is it generic?
@@ -4858,8 +4705,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ Daddu(feedback, vector, Operand(feedback));
__ ld(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, a7, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, a7, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4871,14 +4717,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4895,11 +4741,50 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorStoreICDescriptor::NameRegister(); // a2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // a4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+ Register feedback = a5;
+ Register receiver_map = a6;
+ Register scratch1 = a7;
+
+ __ SmiScale(scratch1, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(scratch1));
+ __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
+
+ Register scratch2 = t0;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ Branch(&miss, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ Branch(USE_DELAY_SLOT, &compare_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
@@ -4913,12 +4798,129 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+
+ __ ld(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ SmiScale(too_far, too_far, kPointerSizeLog2);
+ __ Daddu(too_far, feedback, Operand(too_far));
+ __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Daddu(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ ld(cached_map, MemOperand(pointer_reg));
+ __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
+ // Is it a transitioning store?
+ __ ld(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&transition_call, ne, too_far, Operand(at));
+
+ __ ld(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ Daddu(t9, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&transition_call);
+ __ ld(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ ld(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ Move(feedback, too_far);
+ __ Daddu(t9, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&prepare_next);
+ __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
+
+ // We exhausted our array of map handler pairs.
+ __ Branch(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorStoreICDescriptor::NameRegister(); // a2
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // a3
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // a4
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(a0)); // a0
+ Register feedback = a5;
+ Register receiver_map = a6;
+ Register scratch1 = a7;
+
+ __ SmiScale(scratch1, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(scratch1));
+ __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ Branch(&not_array, ne, scratch1, Heap::kFixedArrayMapRootIndex);
+
+ // We have a polymorphic element handler.
+ Label try_poly_name;
+
+ Register scratch2 = t0;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ Branch(&try_poly_name, ne, feedback, Heap::kmegamorphic_symbolRootIndex);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Branch(&miss, ne, key, Operand(feedback));
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiScale(scratch1, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(scratch1));
+ __ ld(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ Branch(USE_DELAY_SLOT, &compare_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); // In delay slot.
}
@@ -5602,8 +5604,8 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
// --
// -- sp[0] : last argument
// -- ...
- // -- sp[(argc - 1)* 4] : first argument
- // -- sp[argc * 4] : receiver
+ // -- sp[(argc - 1)* 8] : first argument
+ // -- sp[argc * 8] : receiver
// -----------------------------------
Register callee = a0;
@@ -5662,10 +5664,12 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
__ sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
__ li(at, Operand(argc.immediate()));
- __ sd(at, MemOperand(a0, 2 * kPointerSize));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_ = 0
- __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
+ __ sw(zero_reg, MemOperand(a0, 2 * kPointerSize + kIntSize));
} else {
// FunctionCallbackInfo::values_
__ dsll(at, argc.reg(), kPointerSizeLog2);
@@ -5673,11 +5677,13 @@ static void CallApiFunctionStubHelper(MacroAssembler* masm,
__ Daddu(at, at, Operand((FCA::kArgsLength - 1) * kPointerSize));
__ sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
- __ sd(argc.reg(), MemOperand(a0, 2 * kPointerSize));
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ sw(argc.reg(), MemOperand(a0, 2 * kPointerSize));
// FunctionCallbackInfo::is_construct_call_
__ Daddu(argc.reg(), argc.reg(), Operand(FCA::kArgsLength + 1));
__ dsll(at, argc.reg(), kPointerSizeLog2);
- __ sd(at, MemOperand(a0, 3 * kPointerSize));
+ __ sw(at, MemOperand(a0, 2 * kPointerSize + kIntSize));
}
ExternalReference thunk_ref =
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 7c61f71621..d30bdbb294 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/mips64/codegen-mips64.h"
+
#if V8_TARGET_ARCH_MIPS64
#include "src/codegen.h"
@@ -43,21 +45,13 @@ UnaryMathFunction CreateExpFunction() {
Register temp2 = a5;
Register temp3 = a6;
- if (!IsMipsSoftFloatABI) {
- // Input value is in f12 anyway, nothing to do.
- } else {
- __ Move(input, a0, a1);
- }
+ __ MovFromFloatParameter(input);
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(
&masm, input, result, double_scratch1, double_scratch2,
temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
- if (!IsMipsSoftFloatABI) {
- // Result is already in f0, nothing to do.
- } else {
- __ Move(v0, v1, result);
- }
+ __ MovToFloatResult(result);
__ Ret();
}
@@ -139,10 +133,17 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
__ beq(a3, zero_reg, &aligned); // Already aligned.
__ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
- __ lwr(t8, MemOperand(a1));
- __ addu(a1, a1, a3);
- __ swr(t8, MemOperand(a0));
- __ addu(a0, a0, a3);
+ if (kArchEndian == kLittle) {
+ __ lwr(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swr(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(t8, MemOperand(a1));
+ __ addu(a1, a1, a3);
+ __ swl(t8, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
// Now dst/src are both aligned to (word) aligned addresses. Set a2 to
// count how many bytes we have to copy after all the 64 byte chunks are
@@ -295,12 +296,21 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
__ beq(a3, zero_reg, &ua_chk16w);
__ subu(a2, a2, a3); // In delay slot.
- __ lwr(v1, MemOperand(a1));
- __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ addu(a1, a1, a3);
- __ swr(v1, MemOperand(a0));
- __ addu(a0, a0, a3);
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swr(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ addu(a1, a1, a3);
+ __ swl(v1, MemOperand(a0));
+ __ addu(a0, a0, a3);
+ }
// Now the dst (but not the source) is aligned. Set a2 to count how many
// bytes we have to copy after all the 64 byte chunks are copied and a3 to
@@ -328,41 +338,79 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
}
__ bind(&ua_loop16w);
- __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
- __ lwr(a4, MemOperand(a1));
- __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
-
- if (pref_hint_store == kPrefHintPrepareForStore) {
- __ sltu(v1, t9, a0);
- __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ if (kArchEndian == kLittle) {
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ __ lwr(a4, MemOperand(a1));
+ __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(a4,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a5,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a6,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a7,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t0,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+ __ lwl(a4, MemOperand(a1));
+ __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
+
+ if (pref_hint_store == kPrefHintPrepareForStore) {
+ __ sltu(v1, t9, a0);
+ __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+ }
+ __ lwl(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
+
+ __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+ __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+ __ bind(&ua_skip_pref);
+ __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(a4,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a5,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a6,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a7,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t0,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
- __ lwr(a7, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
-
- __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
- __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
-
- __ bind(&ua_skip_pref);
- __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
- __ lwl(a4,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a5,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a6,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a7,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t0,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
__ sw(a4, MemOperand(a0));
__ sw(a5, MemOperand(a0, 1, loadstore_chunk));
@@ -372,30 +420,57 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
__ sw(t1, MemOperand(a0, 5, loadstore_chunk));
__ sw(t2, MemOperand(a0, 6, loadstore_chunk));
__ sw(t3, MemOperand(a0, 7, loadstore_chunk));
- __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
- __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
- __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
- __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
- __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
- __ lwl(a4,
- MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a5,
- MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a6,
- MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a7,
- MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t0,
- MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ if (kArchEndian == kLittle) {
+ __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
+ __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
+ __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
+ __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
+ __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
+ __ lwl(a4,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a5,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a6,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a7,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t0,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
+ __ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
+ __ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
+ __ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
+ __ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
+ __ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
+ __ lwr(a4,
+ MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a5,
+ MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a6,
+ MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a7,
+ MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t0,
+ MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+ }
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
__ sw(a4, MemOperand(a0, 8, loadstore_chunk));
__ sw(a5, MemOperand(a0, 9, loadstore_chunk));
@@ -419,30 +494,57 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
__ beq(a2, t8, &ua_chk1w);
__ nop(); // In delay slot.
- __ lwr(a4, MemOperand(a1));
- __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
- __ lwl(a4,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a5,
- MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a6,
- MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(a7,
- MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t0,
- MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t1,
- MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t2,
- MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
- __ lwl(t3,
- MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ if (kArchEndian == kLittle) {
+ __ lwr(a4, MemOperand(a1));
+ __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ lwl(a4,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a5,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a6,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(a7,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t0,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t1,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t2,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwl(t3,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(a4, MemOperand(a1));
+ __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ lwr(a4,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a5,
+ MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a6,
+ MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(a7,
+ MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t0,
+ MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t1,
+ MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t2,
+ MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+ __ lwr(t3,
+ MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+ }
__ addiu(a1, a1, 8 * loadstore_chunk);
__ sw(a4, MemOperand(a0));
__ sw(a5, MemOperand(a0, 1, loadstore_chunk));
@@ -463,9 +565,15 @@ MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
__ addu(a3, a0, a3);
__ bind(&ua_wordCopy_loop);
- __ lwr(v1, MemOperand(a1));
- __ lwl(v1,
- MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ if (kArchEndian == kLittle) {
+ __ lwr(v1, MemOperand(a1));
+ __ lwl(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ } else {
+ __ lwl(v1, MemOperand(a1));
+ __ lwr(v1,
+ MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+ }
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &ua_wordCopy_loop);
@@ -694,7 +802,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Convert and copy elements.
__ bind(&loop);
__ ld(scratch2, MemOperand(scratch1));
- __ Daddu(scratch1, scratch1, kIntSize);
+ __ Daddu(scratch1, scratch1, kPointerSize);
// scratch2: current element
__ JumpIfNotSmi(scratch2, &convert_hole);
__ SmiUntag(scratch2);
@@ -715,9 +823,9 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
}
// mantissa
- __ sw(hole_lower, MemOperand(scratch3));
+ __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
// exponent
- __ sw(hole_upper, MemOperand(scratch3, kIntSize));
+ __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
__ Daddu(scratch3, scratch3, kDoubleSize);
__ bind(&entry);
@@ -784,7 +892,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Register dst_end = length;
Register heap_number_map = scratch;
__ Daddu(src_elements, src_elements,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ SmiScale(dst_end, dst_end, kPointerSizeLog2);
__ Daddu(dst_end, dst_elements, dst_end);
@@ -822,10 +930,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ bind(&loop);
Register upper_bits = key;
- __ lw(upper_bits, MemOperand(src_elements));
+ __ lw(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
__ Daddu(src_elements, src_elements, kDoubleSize);
// upper_bits: current element's upper 32 bit
- // src_elements: address of next element's upper 32 bit
+ // src_elements: address of next element
__ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
// Non-hole double, copy value into a heap number.
@@ -835,11 +943,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
&gc_required);
// heap_number: new heap number
- // Load mantissa of current element, src_elements
- // point to exponent of next element.
- __ lw(scratch2, MemOperand(heap_number, -12));
- __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+ // Load current element, src_elements point to next element.
+
+ __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
+ __ sd(scratch2, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+
__ mov(scratch2, dst_elements);
__ sd(heap_number, MemOperand(dst_elements));
__ Daddu(dst_elements, dst_elements, kPointerSize);
@@ -1043,8 +1151,8 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
__ li(temp3, Operand(ExternalReference::math_exp_log_table()));
__ dsll(at, temp2, 3);
__ Daddu(temp3, temp3, Operand(at));
- __ lwu(temp2, MemOperand(temp3, 0));
- __ lwu(temp3, MemOperand(temp3, kIntSize));
+ __ lwu(temp2, MemOperand(temp3, Register::kMantissaOffset));
+ __ lwu(temp3, MemOperand(temp3, Register::kExponentOffset));
// The first word is loaded is the lower number register.
if (temp2.code() < temp3.code()) {
__ dsll(at, temp1, 20);
diff --git a/deps/v8/src/mips64/constants-mips64.cc b/deps/v8/src/mips64/constants-mips64.cc
index b43601c5cf..efabfe4f26 100644
--- a/deps/v8/src/mips64/constants-mips64.cc
+++ b/deps/v8/src/mips64/constants-mips64.cc
@@ -219,191 +219,6 @@ bool Instruction::IsTrap() const {
}
-Instruction::Type Instruction::InstructionType() const {
- switch (OpcodeFieldRaw()) {
- case SPECIAL:
- switch (FunctionFieldRaw()) {
- case JR:
- case JALR:
- case BREAK:
- case SLL:
- case DSLL:
- case DSLL32:
- case SRL:
- case DSRL:
- case DSRL32:
- case SRA:
- case DSRA:
- case DSRA32:
- case SLLV:
- case DSLLV:
- case SRLV:
- case DSRLV:
- case SRAV:
- case DSRAV:
- case MFHI:
- case MFLO:
- case MULT:
- case DMULT:
- case MULTU:
- case DMULTU:
- case DIV:
- case DDIV:
- case DIVU:
- case DDIVU:
- case ADD:
- case DADD:
- case ADDU:
- case DADDU:
- case SUB:
- case DSUB:
- case SUBU:
- case DSUBU:
- case AND:
- case OR:
- case XOR:
- case NOR:
- case SLT:
- case SLTU:
- case TGE:
- case TGEU:
- case TLT:
- case TLTU:
- case TEQ:
- case TNE:
- case MOVZ:
- case MOVN:
- case MOVCI:
- case SELEQZ_S:
- case SELNEZ_S:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- break;
- case SPECIAL2:
- switch (FunctionFieldRaw()) {
- case MUL:
- case CLZ:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- break;
- case SPECIAL3:
- switch (FunctionFieldRaw()) {
- case INS:
- case EXT:
- case DEXT:
- return kRegisterType;
- case BSHFL: {
- int sa = SaFieldRaw() >> kSaShift;
- switch (sa) {
- case BITSWAP:
- return kRegisterType;
- case WSBH:
- case SEB:
- case SEH:
- return kUnsupported;
- }
- sa >>= kBp2Bits;
- switch (sa) {
- case ALIGN:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- }
- case DBSHFL: {
- int sa = SaFieldRaw() >> kSaShift;
- switch (sa) {
- case DBITSWAP:
- return kRegisterType;
- case DSBH:
- case DSHD:
- return kUnsupported;
- }
- sa = SaFieldRaw() >> kSaShift;
- sa >>= kBp3Bits;
- switch (sa) {
- case DALIGN:
- return kRegisterType;
- default:
- return kUnsupported;
- }
- }
- default:
- return kUnsupported;
- }
- break;
- case COP1: // Coprocessor instructions.
- switch (RsFieldRawNoAssert()) {
- case BC1: // Branch on coprocessor condition.
- case BC1EQZ:
- case BC1NEZ:
- return kImmediateType;
- default:
- return kRegisterType;
- }
- break;
- case COP1X:
- return kRegisterType;
- // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
- case REGIMM:
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- case ADDI:
- case DADDI:
- case ADDIU:
- case DADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- case BEQL:
- case BNEL:
- case BLEZL:
- case BGTZL:
- case POP66:
- case POP76:
- case LB:
- case LH:
- case LWL:
- case LW:
- case LWU:
- case LD:
- case LBU:
- case LHU:
- case LWR:
- case SB:
- case SH:
- case SWL:
- case SW:
- case SD:
- case SWR:
- case LWC1:
- case LDC1:
- case SWC1:
- case SDC1:
- case PCREL:
- case BC:
- case BALC:
- return kImmediateType;
- // 26 bits immediate type instructions. e.g.: j imm26.
- case J:
- case JAL:
- return kJumpType;
- default:
- return kUnsupported;
- }
- return kUnsupported;
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index 898a4dbb1d..f23f103ac3 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -35,6 +35,16 @@ enum ArchVariants {
#endif
+ enum Endianness { kLittle, kBig };
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+ static const Endianness kArchEndian = kLittle;
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ static const Endianness kArchEndian = kBig;
+#else
+#error Unknown endianness
+#endif
+
// TODO(plind): consider deriving ABI from compiler flags or build system.
// ABI-dependent definitions are made with #define in simulator-mips64.h,
@@ -888,8 +898,67 @@ class Instruction {
kUnsupported = -1
};
+ enum TypeChecks { NORMAL, EXTRA };
+
+
+#define OpcodeToBitNumber(opcode) \
+ (1ULL << (static_cast<uint32_t>(opcode) >> kOpcodeShift))
+
+ static const uint64_t kOpcodeImmediateTypeMask =
+ OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) |
+ OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) |
+ OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) |
+ OpcodeToBitNumber(DADDI) | OpcodeToBitNumber(ADDIU) |
+ OpcodeToBitNumber(DADDIU) | OpcodeToBitNumber(SLTI) |
+ OpcodeToBitNumber(SLTIU) | OpcodeToBitNumber(ANDI) |
+ OpcodeToBitNumber(ORI) | OpcodeToBitNumber(XORI) |
+ OpcodeToBitNumber(LUI) | OpcodeToBitNumber(BEQL) |
+ OpcodeToBitNumber(BNEL) | OpcodeToBitNumber(BLEZL) |
+ OpcodeToBitNumber(BGTZL) | OpcodeToBitNumber(POP66) |
+ OpcodeToBitNumber(POP76) | OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) |
+ OpcodeToBitNumber(LWL) | OpcodeToBitNumber(LW) | OpcodeToBitNumber(LWU) |
+ OpcodeToBitNumber(LD) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) |
+ OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) |
+ OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SD) |
+ OpcodeToBitNumber(SWR) | OpcodeToBitNumber(LWC1) |
+ OpcodeToBitNumber(LDC1) | OpcodeToBitNumber(SWC1) |
+ OpcodeToBitNumber(SDC1) | OpcodeToBitNumber(PCREL) |
+ OpcodeToBitNumber(BC) | OpcodeToBitNumber(BALC);
+
+#define FunctionFieldToBitNumber(function) (1ULL << function)
+
+ static const uint64_t kFunctionFieldRegisterTypeMask =
+ FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) |
+ FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) |
+ FunctionFieldToBitNumber(DSLL) | FunctionFieldToBitNumber(DSLL32) |
+ FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(DSRL) |
+ FunctionFieldToBitNumber(DSRL32) | FunctionFieldToBitNumber(SRA) |
+ FunctionFieldToBitNumber(DSRA) | FunctionFieldToBitNumber(DSRA32) |
+ FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(DSLLV) |
+ FunctionFieldToBitNumber(SRLV) | FunctionFieldToBitNumber(DSRLV) |
+ FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(DSRAV) |
+ FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) |
+ FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(DMULT) |
+ FunctionFieldToBitNumber(MULTU) | FunctionFieldToBitNumber(DMULTU) |
+ FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DDIV) |
+ FunctionFieldToBitNumber(DIVU) | FunctionFieldToBitNumber(DDIVU) |
+ FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(DADD) |
+ FunctionFieldToBitNumber(ADDU) | FunctionFieldToBitNumber(DADDU) |
+ FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(DSUB) |
+ FunctionFieldToBitNumber(SUBU) | FunctionFieldToBitNumber(DSUBU) |
+ FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) |
+ FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) |
+ FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) |
+ FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) |
+ FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) |
+ FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) |
+ FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) |
+ FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) |
+ FunctionFieldToBitNumber(SELNEZ_S);
+
+
// Get the encoding type of the instruction.
- Type InstructionType() const;
+ inline Type InstructionType(TypeChecks checks = NORMAL) const;
// Accessors for the different named fields used in the MIPS encoding.
@@ -1078,6 +1147,111 @@ const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+
+Instruction::Type Instruction::InstructionType(TypeChecks checks) const {
+ if (checks == EXTRA) {
+ if (OpcodeToBitNumber(OpcodeFieldRaw()) & kOpcodeImmediateTypeMask) {
+ return kImmediateType;
+ }
+ }
+ switch (OpcodeFieldRaw()) {
+ case SPECIAL:
+ if (checks == EXTRA) {
+ if (FunctionFieldToBitNumber(FunctionFieldRaw()) &
+ kFunctionFieldRegisterTypeMask) {
+ return kRegisterType;
+ } else {
+ return kUnsupported;
+ }
+ } else {
+ return kRegisterType;
+ }
+ break;
+ case SPECIAL2:
+ switch (FunctionFieldRaw()) {
+ case MUL:
+ case CLZ:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ break;
+ case SPECIAL3:
+ switch (FunctionFieldRaw()) {
+ case INS:
+ case EXT:
+ case DEXT:
+ return kRegisterType;
+ case BSHFL: {
+ int sa = SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP:
+ return kRegisterType;
+ case WSBH:
+ case SEB:
+ case SEH:
+ return kUnsupported;
+ }
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ }
+ case DBSHFL: {
+ int sa = SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case DBITSWAP:
+ return kRegisterType;
+ case DSBH:
+ case DSHD:
+ return kUnsupported;
+ }
+ sa = SaFieldRaw() >> kSaShift;
+ sa >>= kBp3Bits;
+ switch (sa) {
+ case DALIGN:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ }
+ }
+ default:
+ return kUnsupported;
+ }
+ break;
+ case COP1: // Coprocessor instructions.
+ switch (RsFieldRawNoAssert()) {
+ case BC1: // Branch on coprocessor condition.
+ case BC1EQZ:
+ case BC1NEZ:
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ }
+ break;
+ case COP1X:
+ return kRegisterType;
+
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case J:
+ case JAL:
+ return kJumpType;
+
+ default:
+ if (checks == NORMAL) {
+ return kImmediateType;
+ } else {
+ return kUnsupported;
+ }
+ }
+ return kUnsupported;
+}
+
+#undef OpcodeToBitNumber
+#undef FunctionFieldToBitNumber
} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index 9639cef4dc..ffab261cd1 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -1828,7 +1828,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
- switch (instr->InstructionType()) {
+ switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
case Instruction::kRegisterType: {
return DecodeTypeRegister(instr);
}
diff --git a/deps/v8/src/mips64/frames-mips64.cc b/deps/v8/src/mips64/frames-mips64.cc
index 5427367d47..f8ac6bf194 100644
--- a/deps/v8/src/mips64/frames-mips64.cc
+++ b/deps/v8/src/mips64/frames-mips64.cc
@@ -8,6 +8,7 @@
#include "src/frames.h"
#include "src/mips64/assembler-mips64-inl.h"
#include "src/mips64/assembler-mips64.h"
+#include "src/mips64/frames-mips64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 6f1201d26a..ab697812de 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -31,6 +31,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return a4; }
const Register VectorStoreICDescriptor::VectorRegister() { return a3; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return a4; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return a3; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return a5; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return a3; }
@@ -41,14 +46,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return a2; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return a0; }
-const Register InstanceofDescriptor::left() { return a0; }
-const Register InstanceofDescriptor::right() { return a1; }
+const Register InstanceOfDescriptor::LeftRegister() { return a1; }
+const Register InstanceOfDescriptor::RightRegister() { return a0; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return a1; }
+const Register StringCompareDescriptor::RightRegister() { return a0; }
const Register ArgumentsAccessReadDescriptor::index() { return a1; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
+const Register ArgumentsAccessNewDescriptor::function() { return a1; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return a2; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return a3; }
+
+
const Register ApiGetterDescriptor::function_address() { return a2; }
@@ -64,10 +78,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -94,6 +108,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return a0; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return a0; }
@@ -181,6 +199,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: target
+ // a0: number of arguments
+ Register registers[] = {a1, a0};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a2, a1, a0};
@@ -363,6 +390,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ a0, // argument count (including receiver)
+ a2, // address of first argument
+ a1 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/mips64/lithium-codegen-mips64.cc
index 77813d50cb..a26d099a62 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.cc
@@ -4,12 +4,12 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/mips64/lithium-codegen-mips64.h"
#include "src/mips64/lithium-gap-resolver-mips64.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -116,8 +116,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
- !info()->is_native() && info()->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -166,16 +165,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in a1.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(a1);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -183,7 +193,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(a1);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in both v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
@@ -216,13 +227,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -772,7 +777,6 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -1017,11 +1021,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2158,11 +2157,17 @@ void LCodeGen::EmitBranchF(InstrType instr,
}
-template<class InstrType>
-void LCodeGen::EmitFalseBranch(InstrType instr,
- Condition condition,
- Register src1,
- const Operand& src2) {
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
+ Register src1, const Operand& src2) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
+ Register src1, const Operand& src2) {
int false_block = instr->FalseDestination(chunk_);
__ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
}
@@ -2482,46 +2487,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ Branch(is_object, eq, input, Operand(temp2));
-
- // Load map.
- __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
- __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
-
- // Load instance type and check that it is in object type range.
- __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ Branch(is_not_object,
- lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
-
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
- Register temp2 = scratch0();
-
- Condition true_cond =
- EmitIsObject(reg, temp1, temp2,
- instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond, temp2,
- Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2593,15 +2558,14 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
-
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ DCHECK(ToRegister(instr->left()).is(a1));
+ DCHECK(ToRegister(instr->right()).is(a0));
- Condition condition = ComputeCompareCondition(op);
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
- EmitBranch(instr, condition, v0, Operand(zero_reg));
+ EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
+ Operand(zero_reg));
}
@@ -2756,141 +2720,41 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
Label true_label, done;
- DCHECK(ToRegister(instr->left()).is(a0)); // Object is in a0.
- DCHECK(ToRegister(instr->right()).is(a1)); // Function is in a1.
- Register result = ToRegister(instr->result());
- DCHECK(result.is(v0));
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(v0));
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- __ Branch(&true_label, eq, result, Operand(zero_reg));
- __ li(result, Operand(factory()->false_value()));
- __ Branch(&done);
- __ bind(&true_label);
- __ li(result, Operand(factory()->true_value()));
- __ bind(&done);
-}
-
-
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- DCHECK(object.is(a0));
- DCHECK(result.is(v0));
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
-
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ li(at, Operand(cell));
- __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
- __ BranchShort(&cache_miss, ne, map, Operand(at));
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false. The distance from map check has to be constant.
- __ li(result, Operand(factory()->the_hole_value()));
- __ Branch(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(temp, Heap::kNullValueRootIndex);
- __ Branch(&false_result, eq, object, Operand(temp));
-
- // String values is not instance of anything.
- Condition cc = __ IsObjectStringType(object, temp, temp);
- __ Branch(&false_result, cc, temp, Operand(zero_reg));
-
- // Go to the deferred code.
- __ Branch(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
}
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- Register result = ToRegister(instr->result());
- DCHECK(result.is(v0));
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ SmiTst(object, at);
+ EmitFalseBranch(instr, eq, at, Operand(zero_reg));
+ }
- // Get the temp register reserved by the instruction. This needs to be a4 as
- // its slot of the pushing of safepoint registers is used to communicate the
- // offset to the location of the map check.
- Register temp = ToRegister(instr->temp());
- DCHECK(temp.is(a4));
- __ li(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 13;
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
- Label before_push_delta;
- __ bind(&before_push_delta);
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE);
- __ StoreToSafepointRegisterSlot(temp, temp);
- }
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(result, result);
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
+ __ LoadRoot(at, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+ __ Branch(&loop, USE_DELAY_SLOT);
+ __ ld(object_map, FieldMemOperand(object_prototype,
+ HeapObject::kMapOffset)); // In delay slot.
}
@@ -3117,7 +2981,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
+ offset = SmiWordOffset(offset);
representation = Representation::Integer32();
}
__ Load(result, FieldMemOperand(object, offset), representation);
@@ -3391,7 +3255,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
+ offset = SmiWordOffset(offset);
}
__ Load(result, MemOperand(store_base, offset), representation);
@@ -3712,11 +3576,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ li(a0, Operand(arity));
- }
+ // Always initialize a0 to the number of actual arguments.
+ __ li(a0, Operand(arity));
// Invoke function.
__ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
@@ -4142,9 +4003,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(a1));
DCHECK(ToRegister(instr->result()).is(v0));
- if (instr->hydrogen()->pass_argument_count()) {
- __ li(a0, Operand(instr->arity()));
- }
+ __ li(a0, Operand(instr->arity()));
// Change context.
__ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -4343,7 +4202,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
__ AssertSmi(scratch2);
}
// Store int value directly to upper half of the smi.
- offset += kPointerSize / 2;
+ offset = SmiWordOffset(offset);
representation = Representation::Integer32();
}
MemOperand operand = FieldMemOperand(destination, offset);
@@ -4609,7 +4468,7 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
- offset += kPointerSize / 2;
+ offset = SmiWordOffset(offset);
representation = Representation::Integer32();
}
@@ -5740,7 +5599,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// a0 = regexp literal clone.
// a2 and a4-a6 are used as temporaries.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ li(a7, instr->hydrogen()->literals());
__ ld(a1, FieldMemOperand(a7, literal_offset));
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -5784,26 +5643,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ li(a2, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ li(a2, Operand(instr->hydrogen()->shared_info()));
- __ li(a1, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, a2, a1);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(a3));
DCHECK(ToRegister(instr->result()).is(v0));
@@ -5898,28 +5737,26 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
final_branch_condition = ne;
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label);
- __ GetObjectType(input, scratch, input);
- __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
- *cmp1 = input;
- *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+ __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ *cmp1 = scratch;
+ *cmp2 = Operand(1 << Map::kIsCallable);
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label);
__ LoadRoot(at, Heap::kNullValueRootIndex);
__ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
- Register map = input;
- __ GetObjectType(input, map, scratch);
- __ Branch(false_label,
- lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ Branch(USE_DELAY_SLOT, false_label,
- gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- // map is still valid, so the BitField can be loaded in delay slot.
- // Check for undetectable objects => false.
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
- __ And(at, at, 1 << Map::kIsUndetectable);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ GetObjectType(input, scratch, scratch1());
+ __ Branch(false_label, lt, scratch1(), Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Check for callable or undetectable objects => false.
+ __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ And(at, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
*cmp1 = at;
*cmp2 = Operand(zero_reg);
final_branch_condition = eq;
@@ -5977,7 +5814,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.h b/deps/v8/src/mips64/lithium-codegen-mips64.h
index 6fb7bc3c85..b08de167be 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.h
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.h
@@ -114,8 +114,6 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
@@ -280,10 +278,11 @@ class LCodeGen: public LCodeGenBase {
Condition condition,
FPURegister src1,
FPURegister src2);
- template<class InstrType>
- void EmitFalseBranch(InstrType instr,
- Condition condition,
- Register src1,
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
+ const Operand& src2);
+ template <class InstrType>
+ void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
const Operand& src2);
template<class InstrType>
void EmitFalseBranchF(InstrType instr,
@@ -306,15 +305,6 @@ class LCodeGen: public LCodeGenBase {
Register* cmp1,
Operand* cmp2);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
- Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/mips64/lithium-mips64.cc
index 26a03fcc67..4f2f161524 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/mips64/lithium-mips64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/mips64/lithium-mips64.h"
+
#include <sstream>
#if V8_TARGET_ARCH_MIPS64
@@ -175,13 +177,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -931,28 +926,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -999,22 +991,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result =
- new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
- UseFixed(instr->right(), a1));
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, v0), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), cp),
- UseFixed(instr->left(), a0),
- FixedTemp(a4));
- return MarkAsCall(DefineFixed(result, v0), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
@@ -1738,14 +1729,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
- temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -2462,13 +2445,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/mips64/lithium-mips64.h
index 71ce5496ac..01463c9d63 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/mips64/lithium-mips64.h
@@ -81,19 +81,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -133,6 +131,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -235,8 +234,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -392,6 +389,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -1007,23 +1010,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1188,41 +1174,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2598,19 +2570,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 006f15b967..26229c9d87 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -9,8 +9,8 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
+#include "src/mips64/macro-assembler-mips64.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -1209,7 +1209,24 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
// Assert fail if the offset from start of object IS actually aligned.
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- // TODO(plind): endian dependency.
+ if (kArchEndian == kLittle) {
+ lwu(rd, rs);
+ lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsll32(scratch, scratch, 0);
+ } else {
+ lw(rd, rs);
+ lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsll32(rd, rd, 0);
+ }
+ Daddu(rd, rd, scratch);
+}
+
+
+// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
+// bits,
+// second word in high bits.
+void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
+ Register scratch) {
lwu(rd, rs);
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
@@ -1223,7 +1240,21 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
// Assert fail if the offset from start of object IS actually aligned.
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- // TODO(plind): endian dependency.
+ if (kArchEndian == kLittle) {
+ sw(rd, rs);
+ dsrl32(scratch, rd, 0);
+ sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ } else {
+ sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsrl32(scratch, rd, 0);
+ sw(scratch, rs);
+ }
+}
+
+
+// Do 64-bit store as two consequent 32-bit stores to unaligned address.
+void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
+ Register scratch) {
sw(rd, rs);
dsrl32(scratch, rd, 0);
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
@@ -1433,21 +1464,6 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
}
-void MacroAssembler::FlushICache(Register address, unsigned instructions) {
- RegList saved_regs = kJSCallerSaved | ra.bit();
- MultiPush(saved_regs);
- AllowExternalCallThatCantCauseGC scope(this);
-
- // Save to a0 in case address == a4.
- Move(a0, address);
- PrepareCallCFunction(2, a4);
-
- li(a1, instructions * kInstrSize);
- CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
- MultiPop(saved_regs);
-}
-
-
void MacroAssembler::Ext(Register rt,
Register rs,
uint16_t pos,
@@ -3789,21 +3805,39 @@ void MacroAssembler::CopyBytes(Register src,
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
- sb(scratch, MemOperand(dst, 0));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 4));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 5));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 6));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 7));
+ if (kArchEndian == kLittle) {
+ sb(scratch, MemOperand(dst, 0));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 4));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 5));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 6));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 7));
+ } else {
+ sb(scratch, MemOperand(dst, 7));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 6));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 5));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 4));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 0));
+ }
Daddu(dst, dst, 8);
Dsubu(length, length, Operand(kPointerSize));
@@ -4001,7 +4035,11 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, v0, v1);
+ if (kArchEndian == kLittle) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, v1, v0);
+ }
} else {
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
}
@@ -4010,9 +4048,13 @@ void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, a0, a1);
+ if (kArchEndian == kLittle) {
+ Move(dst, a0, a1);
+ } else {
+ Move(dst, a1, a0);
+ }
} else {
- Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
+ Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
}
}
@@ -4021,7 +4063,11 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f12, src);
} else {
- Move(a0, a1, src);
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src);
+ } else {
+ Move(a1, a0, src);
+ }
}
}
@@ -4030,7 +4076,11 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f0, src);
} else {
- Move(v0, v1, src);
+ if (kArchEndian == kLittle) {
+ Move(v0, v1, src);
+ } else {
+ Move(v1, v0, src);
+ }
}
}
@@ -4048,8 +4098,13 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
Move(fparg2, src2);
}
} else {
- Move(a0, a1, src1);
- Move(a2, a3, src2);
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
+ } else {
+ Move(a1, a0, src1);
+ Move(a3, a2, src2);
+ }
}
}
@@ -4084,10 +4139,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ li(a0, Operand(actual.immediate()));
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- li(a0, Operand(actual.immediate()));
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -4101,8 +4156,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
}
} else if (actual.is_immediate()) {
- Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
li(a0, Operand(actual.immediate()));
+ Branch(&regular_invoke, eq, expected.reg(), Operand(a0));
} else {
Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
}
@@ -4216,24 +4271,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object,
Register scratch,
Label* fail) {
@@ -4273,34 +4310,8 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- GetObjectType(function, result, scratch);
- Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
- ld(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- lwu(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- And(scratch, scratch,
- Operand(1 << SharedFunctionInfo::kBoundFunction));
- Branch(miss, ne, scratch, Operand(zero_reg));
-
- // Make sure that the function has an instance prototype.
- lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- Branch(&non_instance, ne, scratch, Operand(zero_reg));
- }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
ld(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -4319,15 +4330,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Get the prototype from the initial map.
ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch, scratch);
- }
-
// All done.
bind(&done);
}
@@ -4757,13 +4759,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(t9, id);
+ GetBuiltinEntry(t9, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(t9));
Call(t9);
@@ -4776,19 +4777,19 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ ld(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
- ld(target, FieldMemOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ ld(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(a1));
- GetBuiltinFunction(a1, id);
+ GetBuiltinFunction(a1, native_context_index);
// Load the code entry point from the builtins object.
ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
}
@@ -4927,6 +4928,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ ld(dst, GlobalObjectOperand());
+ ld(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
@@ -5018,6 +5025,14 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ ld(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ ld(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on mips64.
@@ -5088,7 +5103,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
if (save_doubles) {
// The stack is already aligned to 0 modulo 8 for stores with sdc1.
int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
- int space = kNumOfSavedRegisters * kDoubleSize ;
+ int space = kNumOfSavedRegisters * kDoubleSize;
Dsubu(sp, sp, Operand(space));
// Remember: we only need to save every 2nd double FPU value.
for (int i = 0; i < kNumOfSavedRegisters; i++) {
@@ -5176,7 +5191,7 @@ void MacroAssembler::InitializeNewString(Register string,
sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
li(scratch1, Operand(String::kEmptyHashField));
sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+ sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
}
@@ -5395,13 +5410,10 @@ void MacroAssembler::AssertSmi(Register object) {
void MacroAssembler::AssertString(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, a4);
- Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
- push(object);
- ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
- pop(object);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAString, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(lo, kOperandIsNotAString, t8, Operand(FIRST_NONSTRING_TYPE));
}
}
@@ -5409,13 +5421,21 @@ void MacroAssembler::AssertString(Register object) {
void MacroAssembler::AssertName(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
- SmiTst(object, a4);
- Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
- push(object);
- ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
- Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
- pop(object);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAName, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(le, kOperandIsNotAName, t8, Operand(LAST_NAME_TYPE));
+ }
+}
+
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, kOperandIsASmiAndNotAFunction, t8, Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, kOperandIsNotAFunction, t8, Operand(JS_FUNCTION_TYPE));
}
}
@@ -5427,11 +5447,9 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- push(object);
- ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+ ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
- Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
- pop(object);
+ Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
}
}
@@ -5456,88 +5474,6 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- // dsra(mask, mask, kSmiTagSize + 1);
- dsra32(mask, mask, 1);
- Daddu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- JumpIfSmi(object, &is_smi);
- CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- Daddu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- ld(scratch2, MemOperand(scratch1, kPointerSize));
- ld(scratch1, MemOperand(scratch1, 0));
- Xor(scratch1, scratch1, Operand(scratch2));
- And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
- Daddu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- Branch(not_found);
-
- bind(&is_smi);
- Register scratch = scratch1;
- // dsra(scratch, object, 1); // Shift away the tag.
- dsra32(scratch, scratch, 0);
- And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- dsll(scratch, scratch, kPointerSizeLog2 + 1);
- Daddu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- IncrementCounter(isolate()->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -5750,94 +5686,6 @@ void MacroAssembler::CallCFunctionHelper(Register function,
#undef BRANCH_ARGS_CHECK
-void MacroAssembler::PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value) {
- lwu(scratch, MemOperand(li_location));
- // At this point scratch is a lui(at, ...) instruction.
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeALui,
- scratch, Operand(LUI));
- lwu(scratch, MemOperand(li_location));
- }
- dsrl32(t9, new_value, 0);
- Ins(scratch, t9, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location));
-
- lwu(scratch, MemOperand(li_location, kInstrSize));
- // scratch is now ori(at, ...).
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeAnOri,
- scratch, Operand(ORI));
- lwu(scratch, MemOperand(li_location, kInstrSize));
- }
- dsrl(t9, new_value, kImm16Bits);
- Ins(scratch, t9, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location, kInstrSize));
-
- lwu(scratch, MemOperand(li_location, kInstrSize * 3));
- // scratch is now ori(at, ...).
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionToPatchShouldBeAnOri,
- scratch, Operand(ORI));
- lwu(scratch, MemOperand(li_location, kInstrSize * 3));
- }
-
- Ins(scratch, new_value, 0, kImm16Bits);
- sw(scratch, MemOperand(li_location, kInstrSize * 3));
-
- // Update the I-cache so the new lui and ori can be executed.
- FlushICache(li_location, 4);
-}
-
-void MacroAssembler::GetRelocatedValue(Register li_location,
- Register value,
- Register scratch) {
- lwu(value, MemOperand(li_location));
- if (emit_debug_code()) {
- And(value, value, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeALui,
- value, Operand(LUI));
- lwu(value, MemOperand(li_location));
- }
-
- // value now holds a lui instruction. Extract the immediate.
- andi(value, value, kImm16Mask);
- dsll32(value, value, kImm16Bits);
-
- lwu(scratch, MemOperand(li_location, kInstrSize));
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeAnOri,
- scratch, Operand(ORI));
- lwu(scratch, MemOperand(li_location, kInstrSize));
- }
- // "scratch" now holds an ori instruction. Extract the immediate.
- andi(scratch, scratch, kImm16Mask);
- dsll32(scratch, scratch, 0);
-
- or_(value, value, scratch);
-
- lwu(scratch, MemOperand(li_location, kInstrSize * 3));
- if (emit_debug_code()) {
- And(scratch, scratch, kOpcodeMask);
- Check(eq, kTheInstructionShouldBeAnOri,
- scratch, Operand(ORI));
- lwu(scratch, MemOperand(li_location, kInstrSize * 3));
- }
- // "scratch" now holds an ori instruction. Extract the immediate.
- andi(scratch, scratch, kImm16Mask);
- dsll(scratch, scratch, kImm16Bits);
-
- or_(value, value, scratch);
- // Sign extend extracted address.
- dsra(value, value, kImm16Bits);
-}
-
-
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch,
@@ -5872,8 +5720,8 @@ void MacroAssembler::HasColor(Register object,
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color;
- // Note that we are using a 4-byte aligned 8-byte load.
- Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Note that we are using two 4-byte aligned loads.
+ LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, t9, Operand(mask_scratch));
Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
// Shift left 1 by adding.
@@ -5946,7 +5794,8 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
// Note that we are using a 4-byte aligned 8-byte load.
- Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ LoadWordPair(load_scratch,
+ MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, mask_scratch, load_scratch);
Branch(&done, ne, t8, Operand(zero_reg));
@@ -6025,14 +5874,14 @@ void MacroAssembler::EnsureNotWhite(
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Or(t8, t8, Operand(mask_scratch));
- Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
Daddu(t8, t8, Operand(length));
- Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
}
@@ -6045,14 +5894,14 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
And(dst, dst, Operand(Map::EnumLengthBits::kMask));
SmiTag(dst);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index f2d36e22e2..5dfee07ad9 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -116,6 +116,13 @@ bool AreAliased(Register reg1,
// -----------------------------------------------------------------------------
// Static helper functions.
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+#else
+#define SmiWordOffset(offset) offset
+#endif
+
+
inline MemOperand ContextOperand(Register context, int index) {
return MemOperand(context, Context::SlotOffset(index));
}
@@ -133,9 +140,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
- // Assumes that Smis are shifted by 32 bits and little endianness.
+ // Assumes that Smis are shifted by 32 bits.
STATIC_ASSERT(kSmiShift == 32);
- return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
+ return MemOperand(rm, SmiWordOffset(offset));
}
@@ -254,6 +261,8 @@ class MacroAssembler: public Assembler {
void Call(Label* target);
+ void Move(Register dst, Smi* smi) { li(dst, Operand(smi)); }
+
inline void Move(Register dst, Register src) {
if (!dst.is(src)) {
mov(dst, src);
@@ -313,6 +322,24 @@ class MacroAssembler: public Assembler {
void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r);
+ void PushRoot(Heap::RootListIndex index) {
+ LoadRoot(at, index);
+ Push(at);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ LoadRoot(at, index);
+ Branch(if_equal, eq, with, Operand(at));
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal) {
+ LoadRoot(at, index);
+ Branch(if_not_equal, ne, with, Operand(at));
+ }
+
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index);
@@ -662,6 +689,9 @@ class MacroAssembler: public Assembler {
void Uld(Register rd, const MemOperand& rs, Register scratch = at);
void Usd(Register rd, const MemOperand& rs, Register scratch = at);
+ void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
+ void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
@@ -778,11 +808,6 @@ class MacroAssembler: public Assembler {
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
- // from C.
- // Does not handle errors.
- void FlushICache(Register address, unsigned instructions);
-
// MIPS64 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -953,6 +978,9 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -1008,15 +1036,6 @@ class MacroAssembler: public Assembler {
const CallWrapper& call_wrapper);
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- void IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail);
-
void IsObjectJSStringType(Register object,
Register scratch,
Label* fail);
@@ -1071,11 +1090,8 @@ class MacroAssembler: public Assembler {
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
void GetObjectType(Register function,
Register map,
@@ -1374,18 +1390,16 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void JumpToExternalReference(const ExternalReference& builtin,
BranchDelaySlot bd = PROTECT);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in a1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
struct Unresolved {
int pc;
@@ -1571,6 +1585,9 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1590,18 +1607,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// -------------------------------------------------------------------------
// String utilities.
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found);
-
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@@ -1676,20 +1681,14 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void StubPrologue();
void Prologue(bool code_pre_aging);
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
void LeaveFrame(StackFrame::Type type);
- // Patch the relocated value (lui/ori pair).
- void PatchRelocatedValue(Register li_location,
- Register scratch,
- Register new_value);
- // Get the relocatad value (loaded data) from the lui/ori pair.
- void GetRelocatedValue(Register li_location,
- Register value,
- Register scratch);
-
// Expects object in a0 and returns map with validated enum cache
// in a0. Assumes that any other register can be used as a scratch.
void CheckEnumCache(Register null_value, Label* call_runtime);
@@ -1758,10 +1757,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Get the code for the given builtin. Returns if able to resolve
- // the function in the 'resolved' flag.
- Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
-
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 9a0d8fdce8..b82b2d9b3c 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -909,9 +909,6 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
InitializeCoverage();
- for (int i = 0; i < kNumExceptions; i++) {
- exceptions[i] = 0;
- }
last_debugger_input_ = NULL;
}
@@ -1055,18 +1052,26 @@ void Simulator::set_fpu_register(int fpureg, int64_t value) {
void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
// Set ONLY lower 32-bits, leaving upper bits untouched.
- // TODO(plind): big endian issue.
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ int32_t* pword;
+ if (kArchEndian == kLittle) {
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ } else {
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]) + 1;
+ }
*pword = value;
}
void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
// Set ONLY upper 32-bits, leaving lower bits untouched.
- // TODO(plind): big endian issue.
DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
- int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ int32_t* phiword;
+ if (kArchEndian == kLittle) {
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+ } else {
+ phiword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+ }
*phiword = value;
}
@@ -2175,495 +2180,25 @@ void Simulator::PrintStopInfo(uint64_t code) {
}
-void Simulator::SignalExceptions() {
- for (int i = 1; i < kNumExceptions; i++) {
- if (exceptions[i] != 0) {
- V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
- }
- }
+void Simulator::SignalException(Exception e) {
+ V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.",
+ static_cast<int>(e));
}
// Handle execution based on instruction types.
-void Simulator::ConfigureTypeRegister(Instruction* instr, int64_t* alu_out,
- int64_t* i64hilo, uint64_t* u64hilo,
- int64_t* next_pc, int* return_addr_reg,
- bool* do_interrupt, int64_t* i128resultH,
- int64_t* i128resultL) {
- // Every local variable declared here needs to be const.
- // This is to make sure that changed values are sent back to
- // DecodeTypeRegister correctly.
-
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int64_t rs = get_register(rs_reg);
- const uint64_t rs_u = static_cast<uint64_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int64_t rt = get_register(rt_reg);
- const uint64_t rt_u = static_cast<uint64_t>(rt);
- const int32_t rd_reg = instr->RdValue();
- const uint64_t sa = instr->SaValue();
- const uint8_t bp2 = instr->Bp2Value();
- const uint8_t bp3 = instr->Bp3Value();
-
- const int32_t fs_reg = instr->FsValue();
-
-
- // ---------- Configuration.
- switch (op) {
- case COP1: // Coprocessor instructions.
- switch (instr->RsFieldRaw()) {
- case CFC1:
- // At the moment only FCSR is supported.
- DCHECK(fs_reg == kFCSRRegister);
- *alu_out = FCSR_;
- break;
- case MFC1:
- *alu_out = static_cast<int64_t>(get_fpu_register_word(fs_reg));
- break;
- case DMFC1:
- *alu_out = get_fpu_register(fs_reg);
- break;
- case MFHC1:
- *alu_out = get_fpu_register_hi_word(fs_reg);
- break;
- case CTC1:
- case MTC1:
- case DMTC1:
- case MTHC1:
- case S:
- case D:
- case W:
- case L:
- case PS:
- // Do everything in the execution step.
- break;
- default:
- // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
- UNREACHABLE();
- }
- break;
- case COP1X:
- break;
- case SPECIAL:
- switch (instr->FunctionFieldRaw()) {
- case JR:
- case JALR:
- *next_pc = get_register(instr->RsValue());
- *return_addr_reg = instr->RdValue();
- break;
- case SLL:
- *alu_out = static_cast<int32_t>(rt) << sa;
- break;
- case DSLL:
- *alu_out = rt << sa;
- break;
- case DSLL32:
- *alu_out = rt << sa << 32;
- break;
- case SRL:
- if (rs_reg == 0) {
- // Regular logical right shift of a word by a fixed number of
- // bits instruction. RS field is always equal to 0.
- // Sign-extend the 32-bit result.
- *alu_out = static_cast<int32_t>(static_cast<uint32_t>(rt_u) >> sa);
- } else {
- // Logical right-rotate of a word by a fixed number of bits. This
- // is special case of SRL instruction, added in MIPS32 Release 2.
- // RS field is equal to 00001.
- *alu_out = static_cast<int32_t>(
- base::bits::RotateRight32(static_cast<const uint32_t>(rt_u),
- static_cast<const uint32_t>(sa)));
- }
- break;
- case DSRL:
- *alu_out = rt_u >> sa;
- break;
- case DSRL32:
- *alu_out = rt_u >> sa >> 32;
- break;
- case SRA:
- *alu_out = (int32_t)rt >> sa;
- break;
- case DSRA:
- *alu_out = rt >> sa;
- break;
- case DSRA32:
- *alu_out = rt >> sa >> 32;
- break;
- case SLLV:
- *alu_out = (int32_t)rt << rs;
- break;
- case DSLLV:
- *alu_out = rt << rs;
- break;
- case SRLV:
- if (sa == 0) {
- // Regular logical right-shift of a word by a variable number of
- // bits instruction. SA field is always equal to 0.
- *alu_out = static_cast<int32_t>((uint32_t)rt_u >> rs);
- } else {
- // Logical right-rotate of a word by a variable number of bits.
- // This is special case od SRLV instruction, added in MIPS32
- // Release 2. SA field is equal to 00001.
- *alu_out = static_cast<int32_t>(
- base::bits::RotateRight32(static_cast<const uint32_t>(rt_u),
- static_cast<const uint32_t>(rs_u)));
- }
- break;
- case DSRLV:
- if (sa == 0) {
- // Regular logical right-shift of a word by a variable number of
- // bits instruction. SA field is always equal to 0.
- *alu_out = rt_u >> rs;
- } else {
- // Logical right-rotate of a word by a variable number of bits.
- // This is special case od SRLV instruction, added in MIPS32
- // Release 2. SA field is equal to 00001.
- *alu_out =
- base::bits::RotateRight32(static_cast<const uint32_t>(rt_u),
- static_cast<const uint32_t>(rs_u));
- }
- break;
- case SRAV:
- *alu_out = (int32_t)rt >> rs;
- break;
- case DSRAV:
- *alu_out = rt >> rs;
- break;
- case MFHI: // MFHI == CLZ on R6.
- if (kArchVariant != kMips64r6) {
- DCHECK(instr->SaValue() == 0);
- *alu_out = get_register(HI);
- } else {
- // MIPS spec: If no bits were set in GPR rs, the result written to
- // GPR rd is 32.
- DCHECK(instr->SaValue() == 1);
- *alu_out =
- base::bits::CountLeadingZeros32(static_cast<int32_t>(rs_u));
- }
- break;
- case MFLO:
- *alu_out = get_register(LO);
- break;
- case MULT: { // MULT == D_MUL_MUH.
- int32_t rs_lo = static_cast<int32_t>(rs);
- int32_t rt_lo = static_cast<int32_t>(rt);
- *i64hilo = static_cast<int64_t>(rs_lo) * static_cast<int64_t>(rt_lo);
- break;
- }
- case MULTU:
- *u64hilo = static_cast<uint64_t>(rs_u & 0xffffffff) *
- static_cast<uint64_t>(rt_u & 0xffffffff);
- break;
- case DMULT: // DMULT == D_MUL_MUH.
- if (kArchVariant != kMips64r6) {
- *i128resultH = MultiplyHighSigned(rs, rt);
- *i128resultL = rs * rt;
- } else {
- switch (instr->SaValue()) {
- case MUL_OP:
- *i128resultL = rs * rt;
- break;
- case MUH_OP:
- *i128resultH = MultiplyHighSigned(rs, rt);
- break;
- default:
- UNIMPLEMENTED_MIPS();
- break;
- }
- }
- break;
- case DMULTU:
- UNIMPLEMENTED_MIPS();
- break;
- case ADD:
- case DADD:
- if (HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
- }
- }
- *alu_out = rs + rt;
- break;
- case ADDU: {
- int32_t alu32_out = static_cast<int32_t>(rs + rt);
- // Sign-extend result of 32bit operation into 64bit register.
- *alu_out = static_cast<int64_t>(alu32_out);
- break;
- }
- case DADDU:
- *alu_out = rs + rt;
- break;
- case SUB:
- case DSUB:
- if (!HaveSameSign(rs, rt)) {
- if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
- } else if (rs < 0) {
- exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
- }
- }
- *alu_out = rs - rt;
- break;
- case SUBU: {
- int32_t alu32_out = static_cast<int32_t>(rs - rt);
- // Sign-extend result of 32bit operation into 64bit register.
- *alu_out = static_cast<int64_t>(alu32_out);
- break;
- }
- case DSUBU:
- *alu_out = rs - rt;
- break;
- case AND:
- *alu_out = rs & rt;
- break;
- case OR:
- *alu_out = rs | rt;
- break;
- case XOR:
- *alu_out = rs ^ rt;
- break;
- case NOR:
- *alu_out = ~(rs | rt);
- break;
- case SLT:
- *alu_out = rs < rt ? 1 : 0;
- break;
- case SLTU:
- *alu_out = rs_u < rt_u ? 1 : 0;
- break;
- // Break and trap instructions.
- case BREAK:
-
- *do_interrupt = true;
- break;
- case TGE:
- *do_interrupt = rs >= rt;
- break;
- case TGEU:
- *do_interrupt = rs_u >= rt_u;
- break;
- case TLT:
- *do_interrupt = rs < rt;
- break;
- case TLTU:
- *do_interrupt = rs_u < rt_u;
- break;
- case TEQ:
- *do_interrupt = rs == rt;
- break;
- case TNE:
- *do_interrupt = rs != rt;
- break;
- case MOVN:
- case MOVZ:
- case MOVCI:
- // No action taken on decode.
- break;
- case DIV:
- case DIVU:
- case DDIV:
- case DDIVU:
- // div and divu never raise exceptions.
- break;
- case SELEQZ_S:
- case SELNEZ_S:
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL2:
- switch (instr->FunctionFieldRaw()) {
- case MUL:
- // Only the lower 32 bits are kept.
- *alu_out = (int32_t)rs_u * (int32_t)rt_u;
- break;
- case CLZ:
- // MIPS32 spec: If no bits were set in GPR rs, the result written to
- // GPR rd is 32.
- *alu_out =
- base::bits::CountLeadingZeros32(static_cast<uint32_t>(rs_u));
- break;
- default:
- UNREACHABLE();
- }
- break;
- case SPECIAL3:
- switch (instr->FunctionFieldRaw()) {
- case INS: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa;
- uint16_t size = msb - lsb + 1;
- uint64_t mask = (1ULL << size) - 1;
- *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
- break;
- }
- case EXT: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of extract.
- uint16_t lsb = sa;
- uint16_t size = msb + 1;
- uint64_t mask = (1ULL << size) - 1;
- *alu_out = static_cast<int32_t>((rs_u & (mask << lsb)) >> lsb);
- break;
- }
- case DEXT: { // Mips32r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg;
- // Interpret sa field as 5-bit lsb of extract.
- uint16_t lsb = sa;
- uint16_t size = msb + 1;
- uint64_t mask = (1ULL << size) - 1;
- *alu_out = static_cast<int64_t>((rs_u & (mask << lsb)) >> lsb);
- break;
- }
- case BSHFL: {
- int sa = instr->SaFieldRaw() >> kSaShift;
- switch (sa) {
- case BITSWAP: {
- uint32_t input = static_cast<uint32_t>(rt);
- uint32_t output = 0;
- uint8_t i_byte, o_byte;
-
- // Reverse the bit in byte for each individual byte
- for (int i = 0; i < 4; i++) {
- output = output >> 8;
- i_byte = input & 0xff;
-
- // Fast way to reverse bits in byte
- // Devised by Sean Anderson, July 13, 2001
- o_byte =
- static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
- (i_byte * 0x8020LU & 0x88440LU)) *
- 0x10101LU >>
- 16);
-
- output = output | (static_cast<uint32_t>(o_byte << 24));
- input = input >> 8;
- }
-
- *alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
- break;
- }
- case SEB:
- case SEH:
- case WSBH:
- UNREACHABLE();
- break;
- default: {
- sa >>= kBp2Bits;
- switch (sa) {
- case ALIGN: {
- if (bp2 == 0) {
- *alu_out = static_cast<int32_t>(rt);
- } else {
- uint64_t rt_hi = rt << (8 * bp2);
- uint64_t rs_lo = rs >> (8 * (4 - bp2));
- *alu_out = static_cast<int32_t>(rt_hi | rs_lo);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- break;
- }
- }
- break;
- }
- case DBSHFL: {
- int sa = instr->SaFieldRaw() >> kSaShift;
- switch (sa) {
- case DBITSWAP: {
- switch (instr->SaFieldRaw() >> kSaShift) {
- case DBITSWAP_SA: { // Mips64r6
- uint64_t input = static_cast<uint64_t>(rt);
- uint64_t output = 0;
- uint8_t i_byte, o_byte;
-
- // Reverse the bit in byte for each individual byte
- for (int i = 0; i < 8; i++) {
- output = output >> 8;
- i_byte = input & 0xff;
-
- // Fast way to reverse bits in byte
- // Devised by Sean Anderson, July 13, 2001
- o_byte =
- static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
- (i_byte * 0x8020LU & 0x88440LU)) *
- 0x10101LU >>
- 16);
-
- output = output | ((static_cast<uint64_t>(o_byte) << 56));
- input = input >> 8;
- }
-
- *alu_out = static_cast<int64_t>(output);
- break;
- }
- }
- break;
- }
- case DSBH:
- case DSHD:
- UNREACHABLE();
- break;
- default: {
- sa >>= kBp3Bits;
- switch (sa) {
- case DALIGN: {
- if (bp3 == 0) {
- *alu_out = static_cast<int64_t>(rt);
- } else {
- uint64_t rt_hi = rt << (8 * bp3);
- uint64_t rs_lo = rs >> (8 * (8 - bp3));
- *alu_out = static_cast<int64_t>(rt_hi | rs_lo);
- }
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
- break;
- }
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
- const int32_t fs_reg,
- const int32_t ft_reg,
- const int32_t fd_reg) {
+void Simulator::DecodeTypeRegisterSRsType() {
float fs, ft, fd;
- fs = get_fpu_register_float(fs_reg);
- ft = get_fpu_register_float(ft_reg);
- fd = get_fpu_register_float(fd_reg);
+ fs = get_fpu_register_float(fs_reg());
+ ft = get_fpu_register_float(ft_reg());
+ fd = get_fpu_register_float(fd_reg());
int32_t ft_int = bit_cast<int32_t>(ft);
int32_t fd_int = bit_cast<int32_t>(fd);
uint32_t cc, fcsr_cc;
- cc = instr->FCccValue();
+ cc = get_instr()->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
- switch (instr->FunctionFieldRaw()) {
+ switch (get_instr()->FunctionFieldRaw()) {
case RINT: {
DCHECK(kArchVariant == kMips64r6);
float result, temp_result;
@@ -2696,44 +2231,44 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
result = lower;
break;
}
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
break;
}
case ADD_S:
- set_fpu_register_float(fd_reg, fs + ft);
+ set_fpu_register_float(fd_reg(), fs + ft);
break;
case SUB_S:
- set_fpu_register_float(fd_reg, fs - ft);
+ set_fpu_register_float(fd_reg(), fs - ft);
break;
case MUL_S:
- set_fpu_register_float(fd_reg, fs * ft);
+ set_fpu_register_float(fd_reg(), fs * ft);
break;
case DIV_S:
- set_fpu_register_float(fd_reg, fs / ft);
+ set_fpu_register_float(fd_reg(), fs / ft);
break;
case ABS_S:
- set_fpu_register_float(fd_reg, fabs(fs));
+ set_fpu_register_float(fd_reg(), fabs(fs));
break;
case MOV_S:
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
break;
case NEG_S:
- set_fpu_register_float(fd_reg, -fs);
+ set_fpu_register_float(fd_reg(), -fs);
break;
case SQRT_S:
- set_fpu_register_float(fd_reg, fast_sqrt(fs));
+ set_fpu_register_float(fd_reg(), fast_sqrt(fs));
break;
case RSQRT_S: {
float result = 1.0 / fast_sqrt(fs);
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
break;
}
case RECIP_S: {
float result = 1.0 / fs;
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
break;
}
case C_F_D:
@@ -2761,7 +2296,7 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
break;
case CVT_D_S:
- set_fpu_register_double(fd_reg, static_cast<double>(fs));
+ set_fpu_register_double(fd_reg(), static_cast<double>(fs));
break;
case CLASS_S: { // Mips64r6 instruction
// Convert float input to uint32_t for easier bit manipulation
@@ -2824,7 +2359,7 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
DCHECK(result != 0);
fResult = bit_cast<float>(result);
- set_fpu_register_float(fd_reg, fResult);
+ set_fpu_register_float(fd_reg(), fResult);
break;
}
@@ -2832,9 +2367,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
float rounded;
int64_t result;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
@@ -2842,26 +2377,26 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
float rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
break;
}
case TRUNC_W_S: { // Truncate single to word (round towards 0).
float rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case TRUNC_L_S: { // Mips64r2 instruction.
float rounded = trunc(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
@@ -2873,9 +2408,9 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
break;
}
@@ -2888,18 +2423,18 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
result--;
}
int64_t i64 = static_cast<int64_t>(result);
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
case FLOOR_L_S: { // Mips64r2 instruction.
float rounded = floor(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
@@ -2907,38 +2442,38 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
{
float rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
} break;
case CEIL_W_S: // Round double to word towards positive infinity.
{
float rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
}
} break;
case CEIL_L_S: { // Mips64r2 instruction.
float rounded = ceil(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
float result;
if (fabs(fs) > fabs(ft)) {
@@ -2948,18 +2483,18 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
}
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
float result;
if (fabs(fs) < fabs(ft)) {
@@ -2969,78 +2504,76 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_float(fd_reg, result);
+ set_fpu_register_float(fd_reg(), result);
}
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
- set_fpu_register_float(fd_reg, (fs >= ft) ? ft : fs);
+ set_fpu_register_float(fd_reg(), (fs >= ft) ? ft : fs);
}
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_float(fs_reg);
+ fs = get_fpu_register_float(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_float(fd_reg, ft);
+ set_fpu_register_float(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_float(fd_reg, fs);
+ set_fpu_register_float(fd_reg(), fs);
} else {
- set_fpu_register_float(fd_reg, (fs <= ft) ? ft : fs);
+ set_fpu_register_float(fd_reg(), (fs <= ft) ? ft : fs);
}
break;
case SEL:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ set_fpu_register_float(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(
- fd_reg, (ft_int & 0x1) == 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ set_fpu_register_float(fd_reg(), (ft_int & 0x1) == 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case SELNEZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_float(
- fd_reg, (ft_int & 0x1) != 0 ? get_fpu_register_float(fs_reg) : 0.0);
+ set_fpu_register_float(fd_reg(), (ft_int & 0x1) != 0
+ ? get_fpu_register_float(fs_reg())
+ : 0.0);
break;
case MOVZ_C: {
DCHECK(kArchVariant == kMips64r2);
- int32_t rt_reg = instr->RtValue();
- int64_t rt = get_register(rt_reg);
- if (rt == 0) {
- set_fpu_register_float(fd_reg, fs);
+ if (rt() == 0) {
+ set_fpu_register_float(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(kArchVariant == kMips64r2);
- int32_t rt_reg = instr->RtValue();
- int64_t rt = get_register(rt_reg);
- if (rt != 0) {
- set_fpu_register_float(fd_reg, fs);
+ if (rt() != 0) {
+ set_fpu_register_float(fd_reg(), fs);
}
break;
}
case MOVF: {
// Same function field for MOVT.D and MOVF.D
- uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (instr->Bit(16)) { // Read Tf bit.
+ if (get_instr()->Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg, fs);
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_float(fd_reg(), fs);
}
break;
}
@@ -3052,21 +2585,19 @@ void Simulator::DecodeTypeRegisterSRsType(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
- const int32_t fs_reg,
- const int32_t ft_reg,
- const int32_t fd_reg) {
+void Simulator::DecodeTypeRegisterDRsType() {
double ft, fs, fd;
uint32_t cc, fcsr_cc;
- fs = get_fpu_register_double(fs_reg);
- ft = (instr->FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg)
- : 0.0;
- fd = get_fpu_register_double(fd_reg);
- cc = instr->FCccValue();
+ fs = get_fpu_register_double(fs_reg());
+ ft = (get_instr()->FunctionFieldRaw() != MOVF)
+ ? get_fpu_register_double(ft_reg())
+ : 0.0;
+ fd = get_fpu_register_double(fd_reg());
+ cc = get_instr()->FCccValue();
fcsr_cc = get_fcsr_condition_bit(cc);
int64_t ft_int = bit_cast<int64_t>(ft);
int64_t fd_int = bit_cast<int64_t>(fd);
- switch (instr->FunctionFieldRaw()) {
+ switch (get_instr()->FunctionFieldRaw()) {
case RINT: {
DCHECK(kArchVariant == kMips64r6);
double result, temp, temp_result;
@@ -3098,7 +2629,7 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
result = lower;
break;
}
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
if (result != fs) {
set_fcsr_bit(kFCSRInexactFlagBit, true);
}
@@ -3106,56 +2637,52 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
}
case SEL:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg, (fd_int & 0x1) == 0 ? fs : ft);
+ set_fpu_register_double(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft);
break;
case SELEQZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg, (ft_int & 0x1) == 0 ? fs : 0.0);
+ set_fpu_register_double(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0);
break;
case SELNEZ_C:
DCHECK(kArchVariant == kMips64r6);
- set_fpu_register_double(fd_reg, (ft_int & 0x1) != 0 ? fs : 0.0);
+ set_fpu_register_double(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0);
break;
case MOVZ_C: {
DCHECK(kArchVariant == kMips64r2);
- int32_t rt_reg = instr->RtValue();
- int64_t rt = get_register(rt_reg);
- if (rt == 0) {
- set_fpu_register_double(fd_reg, fs);
+ if (rt() == 0) {
+ set_fpu_register_double(fd_reg(), fs);
}
break;
}
case MOVN_C: {
DCHECK(kArchVariant == kMips64r2);
- int32_t rt_reg = instr->RtValue();
- int64_t rt = get_register(rt_reg);
- if (rt != 0) {
- set_fpu_register_double(fd_reg, fs);
+ if (rt() != 0) {
+ set_fpu_register_double(fd_reg(), fs);
}
break;
}
case MOVF: {
// Same function field for MOVT.D and MOVF.D
- uint32_t ft_cc = (ft_reg >> 2) & 0x7;
+ uint32_t ft_cc = (ft_reg() >> 2) & 0x7;
ft_cc = get_fcsr_condition_bit(ft_cc);
- if (instr->Bit(16)) { // Read Tf bit.
+ if (get_instr()->Bit(16)) { // Read Tf bit.
// MOVT.D
- if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ if (test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
} else {
// MOVF.D
- if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg, fs);
+ if (!test_fcsr_bit(ft_cc)) set_fpu_register_double(fd_reg(), fs);
}
break;
}
case MINA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
double result;
if (fabs(fs) > fabs(ft)) {
@@ -3165,18 +2692,18 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
}
break;
case MAXA:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
double result;
if (fabs(fs) < fabs(ft)) {
@@ -3186,67 +2713,67 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
} else {
result = (fs > ft ? fs : ft);
}
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
}
break;
case MIN:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
- set_fpu_register_double(fd_reg, (fs >= ft) ? ft : fs);
+ set_fpu_register_double(fd_reg(), (fs >= ft) ? ft : fs);
}
break;
case MAX:
DCHECK(kArchVariant == kMips64r6);
- fs = get_fpu_register_double(fs_reg);
+ fs = get_fpu_register_double(fs_reg());
if (std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else if (std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_double(fd_reg, ft);
+ set_fpu_register_double(fd_reg(), ft);
} else if (!std::isnan(fs) && std::isnan(ft)) {
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
} else {
- set_fpu_register_double(fd_reg, (fs <= ft) ? ft : fs);
+ set_fpu_register_double(fd_reg(), (fs <= ft) ? ft : fs);
}
break;
case ADD_D:
- set_fpu_register_double(fd_reg, fs + ft);
+ set_fpu_register_double(fd_reg(), fs + ft);
break;
case SUB_D:
- set_fpu_register_double(fd_reg, fs - ft);
+ set_fpu_register_double(fd_reg(), fs - ft);
break;
case MUL_D:
- set_fpu_register_double(fd_reg, fs * ft);
+ set_fpu_register_double(fd_reg(), fs * ft);
break;
case DIV_D:
- set_fpu_register_double(fd_reg, fs / ft);
+ set_fpu_register_double(fd_reg(), fs / ft);
break;
case ABS_D:
- set_fpu_register_double(fd_reg, fabs(fs));
+ set_fpu_register_double(fd_reg(), fabs(fs));
break;
case MOV_D:
- set_fpu_register_double(fd_reg, fs);
+ set_fpu_register_double(fd_reg(), fs);
break;
case NEG_D:
- set_fpu_register_double(fd_reg, -fs);
+ set_fpu_register_double(fd_reg(), -fs);
break;
case SQRT_D:
- set_fpu_register_double(fd_reg, fast_sqrt(fs));
+ set_fpu_register_double(fd_reg(), fast_sqrt(fs));
break;
case RSQRT_D: {
double result = 1.0 / fast_sqrt(fs);
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
break;
}
case RECIP_D: {
double result = 1.0 / fs;
- set_fpu_register_double(fd_reg, result);
+ set_fpu_register_double(fd_reg(), result);
break;
}
case C_UN_D:
@@ -3274,9 +2801,9 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
double rounded;
int32_t result;
round_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register_word(fd_reg, kFPUInvalidResult);
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
}
break;
}
@@ -3289,48 +2816,48 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
// round to the even one.
result--;
}
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
}
} break;
case TRUNC_W_D: // Truncate double to word (round towards 0).
{
double rounded = trunc(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
}
} break;
case FLOOR_W_D: // Round double to word towards negative infinity.
{
double rounded = std::floor(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
}
} break;
case CEIL_W_D: // Round double to word towards positive infinity.
{
double rounded = std::ceil(fs);
int32_t result = static_cast<int32_t>(rounded);
- set_fpu_register_word(fd_reg, result);
+ set_fpu_register_word(fd_reg(), result);
if (set_fcsr_round_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPUInvalidResult);
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
}
} break;
case CVT_S_D: // Convert double to float (single).
- set_fpu_register_float(fd_reg, static_cast<float>(fs));
+ set_fpu_register_float(fd_reg(), static_cast<float>(fs));
break;
case CVT_L_D: { // Mips64r2: Truncate double to 64-bit long-word.
double rounded;
int64_t result;
round64_according_to_fcsr(fs, rounded, result, fs);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
@@ -3343,36 +2870,36 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
result--;
}
int64_t i64 = static_cast<int64_t>(result);
- set_fpu_register(fd_reg, i64);
+ set_fpu_register(fd_reg(), i64);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
case TRUNC_L_D: { // Mips64r2 instruction.
double rounded = trunc(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
case FLOOR_L_D: { // Mips64r2 instruction.
double rounded = floor(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
case CEIL_L_D: { // Mips64r2 instruction.
double rounded = ceil(fs);
int64_t result = static_cast<int64_t>(rounded);
- set_fpu_register(fd_reg, result);
+ set_fpu_register(fd_reg(), result);
if (set_fcsr_round64_error(fs, rounded)) {
- set_fpu_register(fd_reg, kFPU64InvalidResult);
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
}
break;
}
@@ -3437,7 +2964,7 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
DCHECK(result != 0);
dResult = bit_cast<double>(result);
- set_fpu_register_double(fd_reg, dResult);
+ set_fpu_register_double(fd_reg(), dResult);
break;
}
@@ -3451,93 +2978,90 @@ void Simulator::DecodeTypeRegisterDRsType(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterWRsType(Instruction* instr,
- const int32_t fs_reg,
- const int32_t fd_reg,
- const int32_t ft_reg,
- int64_t& alu_out) {
- float fs = get_fpu_register_float(fs_reg);
- float ft = get_fpu_register_float(ft_reg);
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterWRsType() {
+ float fs = get_fpu_register_float(fs_reg());
+ float ft = get_fpu_register_float(ft_reg());
+ int64_t alu_out = 0x12345678;
+ switch (get_instr()->FunctionFieldRaw()) {
case CVT_S_W: // Convert word to float (single).
- alu_out = get_fpu_register_signed_word(fs_reg);
- set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
+ alu_out = get_fpu_register_signed_word(fs_reg());
+ set_fpu_register_float(fd_reg(), static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- alu_out = get_fpu_register_signed_word(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
+ alu_out = get_fpu_register_signed_word(fs_reg());
+ set_fpu_register_double(fd_reg(), static_cast<double>(alu_out));
break;
case CMP_AF:
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft) {
- set_fpu_register_word(fd_reg, -1);
+ set_fpu_register_word(fd_reg(), -1);
} else {
- set_fpu_register_word(fd_reg, 0);
+ set_fpu_register_word(fd_reg(), 0);
}
break;
default:
@@ -3546,93 +3070,90 @@ void Simulator::DecodeTypeRegisterWRsType(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
- const int32_t fs_reg,
- const int32_t fd_reg,
- const int32_t ft_reg) {
- double fs = get_fpu_register_double(fs_reg);
- double ft = get_fpu_register_double(ft_reg);
+void Simulator::DecodeTypeRegisterLRsType() {
+ double fs = get_fpu_register_double(fs_reg());
+ double ft = get_fpu_register_double(ft_reg());
int64_t i64;
- switch (instr->FunctionFieldRaw()) {
+ switch (get_instr()->FunctionFieldRaw()) {
case CVT_D_L: // Mips32r2 instruction.
- i64 = get_fpu_register(fs_reg);
- set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ i64 = get_fpu_register(fs_reg());
+ set_fpu_register_double(fd_reg(), static_cast<double>(i64));
break;
case CVT_S_L:
- i64 = get_fpu_register(fs_reg);
- set_fpu_register_float(fd_reg, static_cast<float>(i64));
+ i64 = get_fpu_register(fs_reg());
+ set_fpu_register_float(fd_reg(), static_cast<float>(i64));
break;
case CMP_AF:
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
break;
case CMP_UN:
if (std::isnan(fs) || std::isnan(ft)) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_EQ:
if (fs == ft) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_UEQ:
if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_LT:
if (fs < ft) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_ULT:
if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_LE:
if (fs <= ft) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_ULE:
if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_OR:
if (!std::isnan(fs) && !std::isnan(ft)) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_UNE:
if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
case CMP_NE:
if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) {
- set_fpu_register(fd_reg, -1);
+ set_fpu_register(fd_reg(), -1);
} else {
- set_fpu_register(fd_reg, 0);
+ set_fpu_register(fd_reg(), 0);
}
break;
default:
@@ -3640,54 +3161,56 @@ void Simulator::DecodeTypeRegisterLRsType(Instruction* instr,
}
}
-void Simulator::DecodeTypeRegisterCOP1(
- Instruction* instr, const int32_t rs_reg, const int64_t rs,
- const uint64_t rs_u, const int32_t rt_reg, const int64_t rt,
- const uint64_t rt_u, const int32_t rd_reg, const int32_t fr_reg,
- const int32_t fs_reg, const int32_t ft_reg, const int32_t fd_reg,
- int64_t& alu_out) {
- switch (instr->RsFieldRaw()) {
+
+void Simulator::DecodeTypeRegisterCOP1() {
+ switch (get_instr()->RsFieldRaw()) {
case BC1: // Branch on coprocessor condition.
case BC1EQZ:
case BC1NEZ:
UNREACHABLE();
break;
case CFC1:
- set_register(rt_reg, alu_out);
+ // At the moment only FCSR is supported.
+ DCHECK(fs_reg() == kFCSRRegister);
+ set_register(rt_reg(), FCSR_);
break;
case MFC1:
+ set_register(rt_reg(),
+ static_cast<int64_t>(get_fpu_register_word(fs_reg())));
+ break;
case DMFC1:
+ set_register(rt_reg(), get_fpu_register(fs_reg()));
+ break;
case MFHC1:
- set_register(rt_reg, alu_out);
+ set_register(rt_reg(), get_fpu_register_hi_word(fs_reg()));
break;
case CTC1:
// At the moment only FCSR is supported.
- DCHECK(fs_reg == kFCSRRegister);
- FCSR_ = static_cast<uint32_t>(registers_[rt_reg]);
+ DCHECK(fs_reg() == kFCSRRegister);
+ FCSR_ = static_cast<uint32_t>(rt());
break;
case MTC1:
// Hardware writes upper 32-bits to zero on mtc1.
- set_fpu_register_hi_word(fs_reg, 0);
- set_fpu_register_word(fs_reg, static_cast<int32_t>(registers_[rt_reg]));
+ set_fpu_register_hi_word(fs_reg(), 0);
+ set_fpu_register_word(fs_reg(), static_cast<int32_t>(rt()));
break;
case DMTC1:
- set_fpu_register(fs_reg, registers_[rt_reg]);
+ set_fpu_register(fs_reg(), rt());
break;
case MTHC1:
- set_fpu_register_hi_word(fs_reg,
- static_cast<int32_t>(registers_[rt_reg]));
+ set_fpu_register_hi_word(fs_reg(), static_cast<int32_t>(rt()));
break;
case S:
- DecodeTypeRegisterSRsType(instr, fs_reg, ft_reg, fd_reg);
+ DecodeTypeRegisterSRsType();
break;
case D:
- DecodeTypeRegisterDRsType(instr, fs_reg, ft_reg, fd_reg);
+ DecodeTypeRegisterDRsType();
break;
case W:
- DecodeTypeRegisterWRsType(instr, fs_reg, fd_reg, ft_reg, alu_out);
+ DecodeTypeRegisterWRsType();
break;
case L:
- DecodeTypeRegisterLRsType(instr, fs_reg, fd_reg, ft_reg);
+ DecodeTypeRegisterLRsType();
break;
default:
UNREACHABLE();
@@ -3695,18 +3218,14 @@ void Simulator::DecodeTypeRegisterCOP1(
}
-void Simulator::DecodeTypeRegisterCOP1X(Instruction* instr,
- const int32_t fr_reg,
- const int32_t fs_reg,
- const int32_t ft_reg,
- const int32_t fd_reg) {
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterCOP1X() {
+ switch (get_instr()->FunctionFieldRaw()) {
case MADD_D:
double fr, ft, fs;
- fr = get_fpu_register_double(fr_reg);
- fs = get_fpu_register_double(fs_reg);
- ft = get_fpu_register_double(ft_reg);
- set_fpu_register_double(fd_reg, fs * ft + fr);
+ fr = get_fpu_register_double(fr_reg());
+ fs = get_fpu_register_double(fs_reg());
+ ft = get_fpu_register_double(ft_reg());
+ set_fpu_register_double(fd_reg(), fs * ft + fr);
break;
default:
UNREACHABLE();
@@ -3714,25 +3233,24 @@ void Simulator::DecodeTypeRegisterCOP1X(Instruction* instr,
}
-void Simulator::DecodeTypeRegisterSPECIAL(
- Instruction* instr, const int32_t rs_reg, const int64_t rs,
- const uint64_t rs_u, const int32_t rt_reg, const int64_t rt,
- const uint64_t rt_u, const int32_t rd_reg, const int32_t fr_reg,
- const int32_t fs_reg, const int32_t ft_reg, const int32_t fd_reg,
- const int64_t i64hilo, const uint64_t u64hilo, const int64_t alu_out,
- const bool do_interrupt, const int64_t current_pc, const int64_t next_pc,
- const int32_t return_addr_reg, const int64_t i128resultH,
- const int64_t i128resultL) {
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterSPECIAL() {
+ int64_t i64hilo;
+ uint64_t u64hilo;
+ int64_t alu_out;
+ bool do_interrupt = false;
+
+ switch (get_instr()->FunctionFieldRaw()) {
case SELEQZ_S:
DCHECK(kArchVariant == kMips64r6);
- set_register(rd_reg, rt == 0 ? rs : 0);
+ set_register(rd_reg(), rt() == 0 ? rs() : 0);
break;
case SELNEZ_S:
DCHECK(kArchVariant == kMips64r6);
- set_register(rd_reg, rt != 0 ? rs : 0);
+ set_register(rd_reg(), rt() != 0 ? rs() : 0);
break;
case JR: {
+ int64_t next_pc = rs();
+ int64_t current_pc = get_pc();
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
@@ -3741,6 +3259,9 @@ void Simulator::DecodeTypeRegisterSPECIAL(
break;
}
case JALR: {
+ int64_t next_pc = rs();
+ int64_t current_pc = get_pc();
+ int32_t return_addr_reg = rd_reg();
Instruction* branch_delay_instr =
reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
@@ -3749,18 +3270,118 @@ void Simulator::DecodeTypeRegisterSPECIAL(
pc_modified_ = true;
break;
}
+ case SLL:
+ SetResult(rd_reg(), static_cast<int32_t>(rt()) << sa());
+ break;
+ case DSLL:
+ SetResult(rd_reg(), rt() << sa());
+ break;
+ case DSLL32:
+ SetResult(rd_reg(), rt() << sa() << 32);
+ break;
+ case SRL:
+ if (rs_reg() == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ // Sign-extend the 32-bit result.
+ alu_out = static_cast<int32_t>(static_cast<uint32_t>(rt_u()) >> sa());
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
+ static_cast<const uint32_t>(sa())));
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ case DSRL:
+ SetResult(rd_reg(), rt_u() >> sa());
+ break;
+ case DSRL32:
+ SetResult(rd_reg(), rt_u() >> sa() >> 32);
+ break;
+ case SRA:
+ SetResult(rd_reg(), (int32_t)rt() >> sa());
+ break;
+ case DSRA:
+ SetResult(rd_reg(), rt() >> sa());
+ break;
+ case DSRA32:
+ SetResult(rd_reg(), rt() >> sa() >> 32);
+ break;
+ case SLLV:
+ SetResult(rd_reg(), (int32_t)rt() << rs());
+ break;
+ case DSLLV:
+ SetResult(rd_reg(), rt() << rs());
+ break;
+ case SRLV:
+ if (sa() == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = static_cast<int32_t>((uint32_t)rt_u() >> rs());
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
+ static_cast<const uint32_t>(rs_u())));
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ case DSRLV:
+ if (sa() == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u() >> rs();
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out =
+ base::bits::RotateRight32(static_cast<const uint32_t>(rt_u()),
+ static_cast<const uint32_t>(rs_u()));
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ case SRAV:
+ SetResult(rd_reg(), (int32_t)rt() >> rs());
+ break;
+ case DSRAV:
+ SetResult(rd_reg(), rt() >> rs());
+ break;
+ case MFHI: // MFHI == CLZ on R6.
+ if (kArchVariant != kMips64r6) {
+ DCHECK(sa() == 0);
+ alu_out = get_register(HI);
+ } else {
+ // MIPS spec: If no bits were set in GPR rs(), the result written to
+ // GPR rd() is 32.
+ DCHECK(sa() == 1);
+ alu_out = base::bits::CountLeadingZeros32(static_cast<int32_t>(rs_u()));
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ case MFLO:
+ SetResult(rd_reg(), get_register(LO));
+ break;
// Instructions using HI and LO registers.
- case MULT:
+ case MULT: { // MULT == D_MUL_MUH.
+ int32_t rs_lo = static_cast<int32_t>(rs());
+ int32_t rt_lo = static_cast<int32_t>(rt());
+ i64hilo = static_cast<int64_t>(rs_lo) * static_cast<int64_t>(rt_lo);
if (kArchVariant != kMips64r6) {
set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
set_register(HI, static_cast<int32_t>(i64hilo >> 32));
} else {
- switch (instr->SaValue()) {
+ switch (sa()) {
case MUL_OP:
- set_register(rd_reg, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(rd_reg(), static_cast<int32_t>(i64hilo & 0xffffffff));
break;
case MUH_OP:
- set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
+ set_register(rd_reg(), static_cast<int32_t>(i64hilo >> 32));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3768,21 +3389,24 @@ void Simulator::DecodeTypeRegisterSPECIAL(
}
}
break;
+ }
case MULTU:
+ u64hilo = static_cast<uint64_t>(rs_u() & 0xffffffff) *
+ static_cast<uint64_t>(rt_u() & 0xffffffff);
set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DMULT: // DMULT == D_MUL_MUH.
if (kArchVariant != kMips64r6) {
- set_register(LO, static_cast<int64_t>(i128resultL));
- set_register(HI, static_cast<int64_t>(i128resultH));
+ set_register(LO, rs() * rt());
+ set_register(HI, MultiplyHighSigned(rs(), rt()));
} else {
- switch (instr->SaValue()) {
+ switch (sa()) {
case MUL_OP:
- set_register(rd_reg, static_cast<int64_t>(i128resultL));
+ set_register(rd_reg(), rs() * rt());
break;
case MUH_OP:
- set_register(rd_reg, static_cast<int64_t>(i128resultH));
+ set_register(rd_reg(), MultiplyHighSigned(rs(), rt()));
break;
default:
UNIMPLEMENTED_MIPS();
@@ -3793,42 +3417,38 @@ void Simulator::DecodeTypeRegisterSPECIAL(
case DMULTU:
UNIMPLEMENTED_MIPS();
break;
- case DSLL:
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
- break;
case DIV:
case DDIV: {
const int64_t int_min_value =
- instr->FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
+ get_instr()->FunctionFieldRaw() == DIV ? INT_MIN : LONG_MIN;
switch (kArchVariant) {
case kMips64r2:
// Divide by zero and overflow was not checked in the
// configuration step - div and divu do not raise exceptions. On
// division by 0 the result will be UNPREDICTABLE. On overflow
// (INT_MIN/-1), return INT_MIN which is what the hardware does.
- if (rs == int_min_value && rt == -1) {
+ if (rs() == int_min_value && rt() == -1) {
set_register(LO, int_min_value);
set_register(HI, 0);
- } else if (rt != 0) {
- set_register(LO, rs / rt);
- set_register(HI, rs % rt);
+ } else if (rt() != 0) {
+ set_register(LO, rs() / rt());
+ set_register(HI, rs() % rt());
}
break;
case kMips64r6:
- switch (instr->SaValue()) {
+ switch (sa()) {
case DIV_OP:
- if (rs == int_min_value && rt == -1) {
- set_register(rd_reg, int_min_value);
- } else if (rt != 0) {
- set_register(rd_reg, rs / rt);
+ if (rs() == int_min_value && rt() == -1) {
+ set_register(rd_reg(), int_min_value);
+ } else if (rt() != 0) {
+ set_register(rd_reg(), rs() / rt());
}
break;
case MOD_OP:
- if (rs == int_min_value && rt == -1) {
- set_register(rd_reg, 0);
- } else if (rt != 0) {
- set_register(rd_reg, rs % rt);
+ if (rs() == int_min_value && rt() == -1) {
+ set_register(rd_reg(), 0);
+ } else if (rt() != 0) {
+ set_register(rd_reg(), rs() % rt());
}
break;
default:
@@ -3842,91 +3462,317 @@ void Simulator::DecodeTypeRegisterSPECIAL(
break;
}
case DIVU:
- if (rt_u != 0) {
- set_register(LO, rs_u / rt_u);
- set_register(HI, rs_u % rt_u);
+ if (rt_u() != 0) {
+ uint32_t rt_u_32 = static_cast<uint32_t>(rt_u());
+ uint32_t rs_u_32 = static_cast<uint32_t>(rs_u());
+ set_register(LO, rs_u_32 / rt_u_32);
+ set_register(HI, rs_u_32 % rt_u_32);
+ }
+ break;
+ case DDIVU:
+ if (rt_u() != 0) {
+ set_register(LO, rs_u() / rt_u());
+ set_register(HI, rs_u() % rt_u());
}
break;
+ case ADD:
+ case DADD:
+ if (HaveSameSign(rs(), rt())) {
+ if (rs() > 0) {
+ if (rs() > (Registers::kMaxValue - rt())) {
+ SignalException(kIntegerOverflow);
+ }
+ } else if (rs() < 0) {
+ if (rs() < (Registers::kMinValue - rt())) {
+ SignalException(kIntegerUnderflow);
+ }
+ }
+ }
+ SetResult(rd_reg(), rs() + rt());
+ break;
+ case ADDU: {
+ int32_t alu32_out = static_cast<int32_t>(rs() + rt());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case DADDU:
+ SetResult(rd_reg(), rs() + rt());
+ break;
+ case SUB:
+ case DSUB:
+ if (!HaveSameSign(rs(), rt())) {
+ if (rs() > 0) {
+ if (rs() > (Registers::kMaxValue + rt())) {
+ SignalException(kIntegerOverflow);
+ }
+ } else if (rs() < 0) {
+ if (rs() < (Registers::kMinValue + rt())) {
+ SignalException(kIntegerUnderflow);
+ }
+ }
+ }
+ SetResult(rd_reg(), rs() - rt());
+ break;
+ case SUBU: {
+ int32_t alu32_out = static_cast<int32_t>(rs() - rt());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case DSUBU:
+ SetResult(rd_reg(), rs() - rt());
+ break;
+ case AND:
+ SetResult(rd_reg(), rs() & rt());
+ break;
+ case OR:
+ SetResult(rd_reg(), rs() | rt());
+ break;
+ case XOR:
+ SetResult(rd_reg(), rs() ^ rt());
+ break;
+ case NOR:
+ SetResult(rd_reg(), ~(rs() | rt()));
+ break;
+ case SLT:
+ SetResult(rd_reg(), rs() < rt() ? 1 : 0);
+ break;
+ case SLTU:
+ SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0);
+ break;
// Break and trap instructions.
case BREAK:
+ do_interrupt = true;
+ break;
case TGE:
+ do_interrupt = rs() >= rt();
+ break;
case TGEU:
+ do_interrupt = rs_u() >= rt_u();
+ break;
case TLT:
+ do_interrupt = rs() < rt();
+ break;
case TLTU:
+ do_interrupt = rs_u() < rt_u();
+ break;
case TEQ:
+ do_interrupt = rs() == rt();
+ break;
case TNE:
- if (do_interrupt) {
- SoftwareInterrupt(instr);
- }
+ do_interrupt = rs() != rt();
break;
// Conditional moves.
case MOVN:
- if (rt) {
- set_register(rd_reg, rs);
- TraceRegWr(rs);
+ if (rt()) {
+ SetResult(rd_reg(), rs());
}
break;
case MOVCI: {
- uint32_t cc = instr->FBccValue();
+ uint32_t cc = get_instr()->FBccValue();
uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
- if (instr->Bit(16)) { // Read Tf bit.
- if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ if (get_instr()->Bit(16)) { // Read Tf bit.
+ if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
} else {
- if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs());
}
break;
}
case MOVZ:
- if (!rt) {
- set_register(rd_reg, rs);
- TraceRegWr(rs);
+ if (!rt()) {
+ SetResult(rd_reg(), rs());
}
break;
- default: // For other special opcodes we do the default operation.
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ default:
+ UNREACHABLE();
+ }
+ if (do_interrupt) {
+ SoftwareInterrupt(get_instr());
}
}
-void Simulator::DecodeTypeRegisterSPECIAL2(Instruction* instr,
- const int32_t rd_reg,
- int64_t alu_out) {
- switch (instr->FunctionFieldRaw()) {
+void Simulator::DecodeTypeRegisterSPECIAL2() {
+ int64_t alu_out;
+ switch (get_instr()->FunctionFieldRaw()) {
case MUL:
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ alu_out = static_cast<int32_t>(rs_u()) * static_cast<int32_t>(rt_u());
+ SetResult(rd_reg(), alu_out);
// HI and LO are UNPREDICTABLE after the operation.
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
- default: // For other special2 opcodes we do the default operation.
- set_register(rd_reg, alu_out);
+ case CLZ:
+ // MIPS32 spec: If no bits were set in GPR rs(), the result written to
+ // GPR rd is 32.
+ alu_out = base::bits::CountLeadingZeros32(static_cast<uint32_t>(rs_u()));
+ set_register(rd_reg(), alu_out);
+ break;
+ default:
+ alu_out = 0x12345678;
+ UNREACHABLE();
}
}
-void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
- const int32_t rt_reg,
- const int32_t rd_reg,
- const int64_t alu_out) {
- switch (instr->FunctionFieldRaw()) {
- case INS:
- // Ins instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- TraceRegWr(alu_out);
+void Simulator::DecodeTypeRegisterSPECIAL3() {
+ int64_t alu_out;
+ switch (get_instr()->FunctionFieldRaw()) {
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
break;
- case EXT:
- case DEXT:
- // Dext/Ext instr leaves result in Rt, rather than Rd.
- set_register(rt_reg, alu_out);
- TraceRegWr(alu_out);
+ }
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa();
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int32_t>((rs_u() & (mask << lsb)) >> lsb);
+ SetResult(rt_reg(), alu_out);
break;
- case BSHFL:
- case DBSHFL:
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ }
+ case DEXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa();
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
+ SetResult(rt_reg(), alu_out);
break;
+ }
+ case BSHFL: {
+ int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case BITSWAP: {
+ uint32_t input = static_cast<uint32_t>(rt());
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ break;
+ }
+ case SEB:
+ case SEH:
+ case WSBH:
+ alu_out = 0x12345678;
+ UNREACHABLE();
+ break;
+ default: {
+ const uint8_t bp2 = get_instr()->Bp2Value();
+ sa >>= kBp2Bits;
+ switch (sa) {
+ case ALIGN: {
+ if (bp2 == 0) {
+ alu_out = static_cast<int32_t>(rt());
+ } else {
+ uint64_t rt_hi = rt() << (8 * bp2);
+ uint64_t rs_lo = rs() >> (8 * (4 - bp2));
+ alu_out = static_cast<int32_t>(rt_hi | rs_lo);
+ }
+ break;
+ }
+ default:
+ alu_out = 0x12345678;
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case DBSHFL: {
+ int32_t sa = get_instr()->SaFieldRaw() >> kSaShift;
+ switch (sa) {
+ case DBITSWAP: {
+ switch (sa) {
+ case DBITSWAP_SA: { // Mips64r6
+ uint64_t input = static_cast<uint64_t>(rt());
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xff;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte =
+ static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | ((static_cast<uint64_t>(o_byte) << 56));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ break;
+ }
+ }
+ break;
+ }
+ case DSBH:
+ case DSHD:
+ alu_out = 0x12345678;
+ UNREACHABLE();
+ break;
+ default: {
+ const uint8_t bp3 = get_instr()->Bp3Value();
+ sa >>= kBp3Bits;
+ switch (sa) {
+ case DALIGN: {
+ if (bp3 == 0) {
+ alu_out = static_cast<int64_t>(rt());
+ } else {
+ uint64_t rt_hi = rt() << (8 * bp3);
+ uint64_t rs_lo = rs() >> (8 * (8 - bp3));
+ alu_out = static_cast<int64_t>(rt_hi | rs_lo);
+ }
+ break;
+ }
+ default:
+ alu_out = 0x12345678;
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -3934,97 +3780,46 @@ void Simulator::DecodeTypeRegisterSPECIAL3(Instruction* instr,
void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields.
- const Opcode op = instr->OpcodeFieldRaw();
- const int32_t rs_reg = instr->RsValue();
- const int64_t rs = get_register(rs_reg);
- const uint64_t rs_u = static_cast<uint32_t>(rs);
- const int32_t rt_reg = instr->RtValue();
- const int64_t rt = get_register(rt_reg);
- const uint64_t rt_u = static_cast<uint32_t>(rt);
- const int32_t rd_reg = instr->RdValue();
-
- const int32_t fr_reg = instr->FrValue();
- const int32_t fs_reg = instr->FsValue();
- const int32_t ft_reg = instr->FtValue();
- const int32_t fd_reg = instr->FdValue();
- int64_t i64hilo = 0;
- uint64_t u64hilo = 0;
-
- // ALU output.
- // It should not be used as is. Instructions using it should always
- // initialize it first.
- int64_t alu_out = 0x12345678;
-
- // For break and trap instructions.
- bool do_interrupt = false;
-
- // For jr and jalr.
- // Get current pc.
- int64_t current_pc = get_pc();
- // Next pc
- int64_t next_pc = 0;
- int32_t return_addr_reg = 31;
-
- int64_t i128resultH;
- int64_t i128resultL;
-
- // Set up the variables if needed before executing the instruction.
- ConfigureTypeRegister(instr,
- &alu_out,
- &i64hilo,
- &u64hilo,
- &next_pc,
- &return_addr_reg,
- &do_interrupt,
- &i128resultH,
- &i128resultL);
-
- // ---------- Raise exceptions triggered.
- SignalExceptions();
+ set_instr(instr);
// ---------- Execution.
- switch (op) {
+ switch (instr->OpcodeFieldRaw()) {
case COP1:
- DecodeTypeRegisterCOP1(instr, rs_reg, rs, rs_u, rt_reg, rt, rt_u, rd_reg,
- fr_reg, fs_reg, ft_reg, fd_reg, alu_out);
+ DecodeTypeRegisterCOP1();
break;
case COP1X:
- DecodeTypeRegisterCOP1X(instr, fr_reg, fs_reg, ft_reg, fd_reg);
+ DecodeTypeRegisterCOP1X();
break;
case SPECIAL:
- DecodeTypeRegisterSPECIAL(
- instr, rs_reg, rs, rs_u, rt_reg, rt, rt_u, rd_reg, fr_reg, fs_reg,
- ft_reg, fd_reg, i64hilo, u64hilo, alu_out, do_interrupt, current_pc,
- next_pc, return_addr_reg, i128resultH, i128resultL);
+ DecodeTypeRegisterSPECIAL();
break;
case SPECIAL2:
- DecodeTypeRegisterSPECIAL2(instr, rd_reg, alu_out);
+ DecodeTypeRegisterSPECIAL2();
break;
case SPECIAL3:
switch (instr->FunctionFieldRaw()) {
case BSHFL: {
- int sa = instr->SaValue();
- sa >>= kBp2Bits;
- switch (sa) {
+ int32_t saVal = sa();
+ saVal >>= kBp2Bits;
+ switch (saVal) {
case ALIGN: {
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3();
break;
}
}
}
case DBSHFL: {
- int sa = instr->SaValue();
- sa >>= kBp3Bits;
- switch (sa) {
+ int32_t saVal = sa();
+ saVal >>= kBp2Bits;
+ switch (saVal) {
case DALIGN: {
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3();
break;
}
}
}
default:
- DecodeTypeRegisterSPECIAL3(instr, rt_reg, rd_reg, alu_out);
+ DecodeTypeRegisterSPECIAL3();
break;
}
break;
@@ -4032,60 +3827,68 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
// so we can use the default here to set the destination register in common
// cases.
default:
- set_register(rd_reg, alu_out);
- TraceRegWr(alu_out);
+ UNREACHABLE();
}
}
+// Branch instructions common part.
+#define BranchAndLinkHelper(do_branch) \
+ execute_branch_delay_instruction = true; \
+ if (do_branch) { \
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
+ set_register(31, current_pc + kBranchReturnOffset); \
+ } else { \
+ next_pc = current_pc + kBranchReturnOffset; \
+ }
+
+
+#define BranchHelper(do_branch) \
+ execute_branch_delay_instruction = true; \
+ if (do_branch) { \
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize; \
+ } else { \
+ next_pc = current_pc + kBranchReturnOffset; \
+ }
+
+
// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
void Simulator::DecodeTypeImmediate(Instruction* instr) {
// Instruction fields.
- Opcode op = instr->OpcodeFieldRaw();
+ Opcode op = instr->OpcodeFieldRaw();
int32_t rs_reg = instr->RsValue();
- int64_t rs = get_register(instr->RsValue());
- uint64_t rs_u = static_cast<uint64_t>(rs);
- int32_t rt_reg = instr->RtValue(); // Destination register.
- int64_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Value();
+ int64_t rs = get_register(instr->RsValue());
+ uint64_t rs_u = static_cast<uint64_t>(rs);
+ int32_t rt_reg = instr->RtValue(); // Destination register.
+ int64_t rt = get_register(rt_reg);
+ int16_t imm16 = instr->Imm16Value();
int32_t imm18 = instr->Imm18Value();
- int32_t imm19 = instr->Imm19Value();
int32_t imm21 = instr->Imm21Value();
int32_t imm26 = instr->Imm26Value();
- int32_t ft_reg = instr->FtValue(); // Destination register.
- int64_t ft = get_fpu_register(ft_reg);
+ int32_t ft_reg = instr->FtValue(); // Destination register.
+ int64_t ft = get_fpu_register(ft_reg);
// Zero extended immediate.
uint64_t oe_imm16 = 0xffff & imm16;
// Sign extended immediate.
int64_t se_imm16 = imm16;
int64_t se_imm18 = imm18 | ((imm18 & 0x20000) ? 0xfffffffffffc0000 : 0);
- int64_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xfffffffffff80000 : 0);
int64_t se_imm26 = imm26 | ((imm26 & 0x2000000) ? 0xfffffffffc000000 : 0);
-
// Get current pc.
int64_t current_pc = get_pc();
// Next pc.
int64_t next_pc = bad_ra;
- // pc increment
- int16_t pc_increment;
// Used for conditional branch instructions.
- bool do_branch = false;
bool execute_branch_delay_instruction = false;
// Used for arithmetic instructions.
int64_t alu_out = 0;
- // Floating point.
- double fp_out = 0.0;
- uint32_t cc, cc_value, fcsr_cc;
// Used for memory instructions.
int64_t addr = 0x0;
- // Value to be written in memory.
- uint64_t mem_value = 0x0;
// Alignment for 32-bit integers used in LWL, LWR, etc.
const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
@@ -4094,11 +3897,11 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
// ------------- COP1. Coprocessor instructions.
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // Branch on coprocessor condition.
- cc = instr->FBccValue();
- fcsr_cc = get_fcsr_condition_bit(cc);
- cc_value = test_fcsr_bit(fcsr_cc);
- do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ case BC1: { // Branch on coprocessor condition.
+ uint32_t cc = instr->FBccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ uint32_t cc_value = test_fcsr_bit(fcsr_cc);
+ bool do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
execute_branch_delay_instruction = true;
// Set next_pc.
if (do_branch) {
@@ -4107,21 +3910,20 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
next_pc = current_pc + kBranchReturnOffset;
}
break;
+ }
case BC1EQZ:
- do_branch = (ft & 0x1) ? false : true;
execute_branch_delay_instruction = true;
// Set next_pc.
- if (do_branch) {
+ if (!(ft & 0x1)) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + kBranchReturnOffset;
}
break;
case BC1NEZ:
- do_branch = (ft & 0x1) ? true : false;
execute_branch_delay_instruction = true;
// Set next_pc.
- if (do_branch) {
+ if (ft & 0x1) {
next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
} else {
next_pc = current_pc + kBranchReturnOffset;
@@ -4135,54 +3937,35 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
- do_branch = (rs < 0);
- break;
- case BLTZAL:
- do_branch = rs < 0;
+ BranchHelper(rs < 0);
break;
case BGEZ:
- do_branch = rs >= 0;
+ BranchHelper(rs >= 0);
+ break;
+ case BLTZAL:
+ BranchAndLinkHelper(rs < 0);
break;
case BGEZAL:
- do_branch = rs >= 0;
+ BranchAndLinkHelper(rs >= 0);
break;
default:
UNREACHABLE();
}
- switch (instr->RtFieldRaw()) {
- case BLTZ:
- case BLTZAL:
- case BGEZ:
- case BGEZAL:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + kBranchReturnOffset);
- }
- } else {
- next_pc = current_pc + kBranchReturnOffset;
- }
- default:
- break;
- }
- break; // case REGIMM.
+ break; // case REGIMM.
// ------------- Branch instructions.
// When comparing to zero, the encoding of rt field is always 0, so we don't
// need to replace rt with zero.
case BEQ:
- do_branch = (rs == rt);
+ BranchHelper(rs == rt);
break;
case BNE:
- do_branch = rs != rt;
+ BranchHelper(rs != rt);
break;
case BLEZ:
- do_branch = rs <= 0;
+ BranchHelper(rs <= 0);
break;
case BGTZ:
- do_branch = rs > 0;
+ BranchHelper(rs > 0);
break;
case POP66: {
if (rs_reg) { // BEQZC
@@ -4216,52 +3999,53 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
case DADDI:
if (HaveSameSign(rs, se_imm16)) {
if (rs > 0) {
- exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+ if (rs > Registers::kMaxValue - se_imm16) {
+ SignalException(kIntegerOverflow);
+ }
} else if (rs < 0) {
- exceptions[kIntegerUnderflow] =
- rs < (Registers::kMinValue - se_imm16);
+ if (rs < Registers::kMinValue - se_imm16) {
+ SignalException(kIntegerUnderflow);
+ }
}
}
- alu_out = rs + se_imm16;
+ SetResult(rt_reg, rs + se_imm16);
break;
case ADDIU: {
int32_t alu32_out = static_cast<int32_t>(rs + se_imm16);
// Sign-extend result of 32bit operation into 64bit register.
- alu_out = static_cast<int64_t>(alu32_out);
+ SetResult(rt_reg, static_cast<int64_t>(alu32_out));
break;
}
case DADDIU:
- alu_out = rs + se_imm16;
+ SetResult(rt_reg, rs + se_imm16);
break;
case SLTI:
- alu_out = (rs < se_imm16) ? 1 : 0;
+ SetResult(rt_reg, rs < se_imm16 ? 1 : 0);
break;
case SLTIU:
- alu_out = (rs_u < static_cast<uint64_t>(se_imm16)) ? 1 : 0;
+ SetResult(rt_reg, rs_u < static_cast<uint64_t>(se_imm16) ? 1 : 0);
break;
case ANDI:
- alu_out = rs & oe_imm16;
+ SetResult(rt_reg, rs & oe_imm16);
break;
case ORI:
- alu_out = rs | oe_imm16;
+ SetResult(rt_reg, rs | oe_imm16);
break;
case XORI:
- alu_out = rs ^ oe_imm16;
+ SetResult(rt_reg, rs ^ oe_imm16);
break;
case LUI: {
int32_t alu32_out = static_cast<int32_t>(oe_imm16 << 16);
// Sign-extend result of 32bit operation into 64bit register.
- alu_out = static_cast<int64_t>(alu32_out);
+ SetResult(rt_reg, static_cast<int64_t>(alu32_out));
break;
}
// ------------- Memory instructions.
case LB:
- addr = rs + se_imm16;
- alu_out = ReadB(addr);
+ set_register(rt_reg, ReadB(rs + se_imm16));
break;
case LH:
- addr = rs + se_imm16;
- alu_out = ReadH(addr, instr);
+ set_register(rt_reg, ReadH(rs + se_imm16, instr));
break;
case LWL: {
// al_offset is offset of the effective address within an aligned word.
@@ -4272,27 +4056,23 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = ReadW(addr, instr);
alu_out <<= byte_shift * 8;
alu_out |= rt & mask;
+ set_register(rt_reg, alu_out);
break;
}
case LW:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
+ set_register(rt_reg, ReadW(rs + se_imm16, instr));
break;
case LWU:
- addr = rs + se_imm16;
- alu_out = ReadWU(addr, instr);
+ set_register(rt_reg, ReadWU(rs + se_imm16, instr));
break;
case LD:
- addr = rs + se_imm16;
- alu_out = Read2W(addr, instr);
+ set_register(rt_reg, Read2W(rs + se_imm16, instr));
break;
case LBU:
- addr = rs + se_imm16;
- alu_out = ReadBU(addr);
+ set_register(rt_reg, ReadBU(rs + se_imm16));
break;
case LHU:
- addr = rs + se_imm16;
- alu_out = ReadHU(addr, instr);
+ set_register(rt_reg, ReadHU(rs + se_imm16, instr));
break;
case LWR: {
// al_offset is offset of the effective address within an aligned word.
@@ -4303,59 +4083,68 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = ReadW(addr, instr);
alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
alu_out |= rt & mask;
+ set_register(rt_reg, alu_out);
break;
}
case SB:
- addr = rs + se_imm16;
+ WriteB(rs + se_imm16, static_cast<int8_t>(rt));
break;
case SH:
- addr = rs + se_imm16;
+ WriteH(rs + se_imm16, static_cast<uint16_t>(rt), instr);
break;
case SWL: {
uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
uint8_t byte_shift = kInt32AlignmentMask - al_offset;
uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr) & mask;
+ uint64_t mem_value = ReadW(addr, instr) & mask;
mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ WriteW(addr, static_cast<int32_t>(mem_value), instr);
break;
}
case SW:
+ WriteW(rs + se_imm16, static_cast<int32_t>(rt), instr);
+ break;
case SD:
- addr = rs + se_imm16;
+ Write2W(rs + se_imm16, rt, instr);
break;
case SWR: {
uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
uint32_t mask = (1 << al_offset * 8) - 1;
addr = rs + se_imm16 - al_offset;
- mem_value = ReadW(addr, instr);
+ uint64_t mem_value = ReadW(addr, instr);
mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ WriteW(addr, static_cast<int32_t>(mem_value), instr);
break;
}
case LWC1:
- addr = rs + se_imm16;
- alu_out = ReadW(addr, instr);
+ set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(ft_reg, ReadW(rs + se_imm16, instr));
break;
case LDC1:
- addr = rs + se_imm16;
- fp_out = ReadD(addr, instr);
+ set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr));
break;
- case SWC1:
+ case SWC1: {
+ int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(ft_reg));
+ WriteW(rs + se_imm16, alu_out_32, instr);
+ break;
+ }
case SDC1:
- addr = rs + se_imm16;
+ WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr);
break;
// ------------- JIALC and BNEZC instructions.
- case POP76:
+ case POP76: {
// Next pc.
next_pc = rt + se_imm16;
// The instruction after the jump is NOT executed.
- pc_increment = Instruction::kInstrSize;
+ uint16_t pc_increment = Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + pc_increment);
}
set_pc(next_pc);
pc_modified_ = true;
break;
+ }
// ------------- PC-Relative instructions.
case PCREL: {
// rt field: checking 5-bits.
@@ -4369,6 +4158,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = current_pc + (se_imm16 << 16);
break;
default: {
+ int32_t imm19 = instr->Imm19Value();
// rt field: checking the most significant 3-bits.
rt = (imm21 >> kImm18Bits);
switch (rt) {
@@ -4382,28 +4172,29 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
rt = (imm21 >> kImm19Bits);
switch (rt) {
case LWUPC: {
- int32_t offset = imm19;
// Set sign.
- offset <<= (kOpcodeBits + kRsBits + 2);
- offset >>= (kOpcodeBits + kRsBits + 2);
- addr = current_pc + (offset << 2);
+ imm19 <<= (kOpcodeBits + kRsBits + 2);
+ imm19 >>= (kOpcodeBits + kRsBits + 2);
+ addr = current_pc + (imm19 << 2);
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
alu_out = *ptr;
break;
}
case LWPC: {
- int32_t offset = imm19;
// Set sign.
- offset <<= (kOpcodeBits + kRsBits + 2);
- offset >>= (kOpcodeBits + kRsBits + 2);
- addr = current_pc + (offset << 2);
+ imm19 <<= (kOpcodeBits + kRsBits + 2);
+ imm19 >>= (kOpcodeBits + kRsBits + 2);
+ addr = current_pc + (imm19 << 2);
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
alu_out = *ptr;
break;
}
- case ADDIUPC:
+ case ADDIUPC: {
+ int64_t se_imm19 =
+ imm19 | ((imm19 & 0x40000) ? 0xfffffffffff80000 : 0);
alu_out = current_pc + (se_imm19 << 2);
break;
+ }
default:
UNREACHABLE();
break;
@@ -4414,100 +4205,13 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
break;
}
}
+ set_register(rs_reg, alu_out);
break;
}
default:
UNREACHABLE();
}
- // ---------- Raise exceptions triggered.
- SignalExceptions();
-
- // ---------- Execution.
- switch (op) {
- // ------------- Branch instructions.
- case BEQ:
- case BNE:
- case BLEZ:
- case BGTZ:
- // Branch instructions common part.
- execute_branch_delay_instruction = true;
- // Set next_pc.
- if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
- if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstrSize);
- }
- } else {
- next_pc = current_pc + 2 * Instruction::kInstrSize;
- }
- break;
- // ------------- Arithmetic instructions.
- case ADDI:
- case DADDI:
- case ADDIU:
- case DADDIU:
- case SLTI:
- case SLTIU:
- case ANDI:
- case ORI:
- case XORI:
- case LUI:
- set_register(rt_reg, alu_out);
- TraceRegWr(alu_out);
- break;
- // ------------- Memory instructions.
- case LB:
- case LH:
- case LWL:
- case LW:
- case LWU:
- case LD:
- case LBU:
- case LHU:
- case LWR:
- set_register(rt_reg, alu_out);
- break;
- case SB:
- WriteB(addr, static_cast<int8_t>(rt));
- break;
- case SH:
- WriteH(addr, static_cast<uint16_t>(rt), instr);
- break;
- case SWL:
- WriteW(addr, static_cast<int32_t>(mem_value), instr);
- break;
- case SW:
- WriteW(addr, static_cast<int32_t>(rt), instr);
- break;
- case SD:
- Write2W(addr, rt, instr);
- break;
- case SWR:
- WriteW(addr, static_cast<int32_t>(mem_value), instr);
- break;
- case LWC1:
- set_fpu_register(ft_reg, kFPUInvalidResult); // Trash upper 32 bits.
- set_fpu_register_word(ft_reg, static_cast<int32_t>(alu_out));
- break;
- case LDC1:
- set_fpu_register_double(ft_reg, fp_out);
- break;
- case SWC1:
- addr = rs + se_imm16;
- WriteW(addr, static_cast<int32_t>(get_fpu_register(ft_reg)), instr);
- break;
- case SDC1:
- addr = rs + se_imm16;
- WriteD(addr, get_fpu_register_double(ft_reg), instr);
- break;
- case PCREL:
- set_register(rs_reg, alu_out);
- default:
- break;
- }
-
-
if (execute_branch_delay_instruction) {
// Execute branch delay slot
// We don't check for end_sim_pc. First it should not be met as the current
@@ -4523,6 +4227,9 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
}
}
+#undef BranchHelper
+#undef BranchAndLinkHelper
+
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump(Instruction* instr) {
@@ -4567,7 +4274,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
}
- switch (instr->InstructionType()) {
+ switch (instr->InstructionType(Instruction::TypeChecks::EXTRA)) {
case Instruction::kRegisterType:
DecodeTypeRegister(instr);
break;
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index dea9e30adf..e45cbd449e 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -325,57 +325,54 @@ class Simulator {
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
- // functions called from DecodeTypeRegister
- void DecodeTypeRegisterCOP1(Instruction* instr, const int32_t rs_reg,
- const int64_t rs, const uint64_t rs_u,
- const int32_t rt_reg, const int64_t rt,
- const uint64_t rt_u, const int32_t rd_reg,
- const int32_t fr_reg, const int32_t fs_reg,
- const int32_t ft_reg, const int32_t fd_reg,
- int64_t& alu_out);
-
- void DecodeTypeRegisterCOP1X(Instruction* instr, const int32_t fr_reg,
- const int32_t fs_reg, const int32_t ft_reg,
- const int32_t fd_reg);
-
- void DecodeTypeRegisterSPECIAL(
- Instruction* instr, const int32_t rs_reg, const int64_t rs,
- const uint64_t rs_u, const int32_t rt_reg, const int64_t rt,
- const uint64_t rt_u, const int32_t rd_reg, const int32_t fr_reg,
- const int32_t fs_reg, const int32_t ft_reg, const int32_t fd_reg,
- const int64_t i64hilo, const uint64_t u64hilo, const int64_t alu_out,
- const bool do_interrupt, const int64_t current_pc, const int64_t next_pc,
- const int32_t return_addr_reg, const int64_t i128resultH,
- const int64_t i128resultL);
-
-
- void DecodeTypeRegisterSPECIAL2(Instruction* instr, const int32_t rd_reg,
- const int64_t alu_out);
-
- void DecodeTypeRegisterSPECIAL3(Instruction* instr, const int32_t rt_reg,
- const int32_t rd_reg, const int64_t alu_out);
-
- void DecodeTypeRegisterSRsType(Instruction* instr, const int32_t fs_reg,
- const int32_t ft_reg, const int32_t fd_reg);
-
- void DecodeTypeRegisterDRsType(Instruction* instr, const int32_t fs_reg,
- const int32_t ft_reg, const int32_t fd_reg);
-
- void DecodeTypeRegisterWRsType(Instruction* instr, const int32_t fs_reg,
- const int32_t ft_reg, const int32_t fd_reg,
- int64_t& alu_out);
-
- void DecodeTypeRegisterLRsType(Instruction* instr, const int32_t fs_reg,
- const int32_t fd_reg, const int32_t ft_reg);
+ // functions called from DecodeTypeRegister.
+ void DecodeTypeRegisterCOP1();
+
+ void DecodeTypeRegisterCOP1X();
+
+ void DecodeTypeRegisterSPECIAL();
+
+
+ void DecodeTypeRegisterSPECIAL2();
+
+ void DecodeTypeRegisterSPECIAL3();
+
+ void DecodeTypeRegisterSRsType();
+
+ void DecodeTypeRegisterDRsType();
+
+ void DecodeTypeRegisterWRsType();
+
+ void DecodeTypeRegisterLRsType();
+
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
- // Helper function for DecodeTypeRegister.
- void ConfigureTypeRegister(Instruction* instr, int64_t* alu_out,
- int64_t* i64hilo, uint64_t* u64hilo,
- int64_t* next_pc, int* return_addr_reg,
- bool* do_interrupt, int64_t* result128H,
- int64_t* result128L);
+ Instruction* currentInstr_;
+ inline Instruction* get_instr() const { return currentInstr_; }
+ inline void set_instr(Instruction* instr) { currentInstr_ = instr; }
+
+ inline int32_t rs_reg() const { return currentInstr_->RsValue(); }
+ inline int64_t rs() const { return get_register(rs_reg()); }
+ inline uint64_t rs_u() const {
+ return static_cast<uint64_t>(get_register(rs_reg()));
+ }
+ inline int32_t rt_reg() const { return currentInstr_->RtValue(); }
+ inline int64_t rt() const { return get_register(rt_reg()); }
+ inline uint64_t rt_u() const {
+ return static_cast<uint64_t>(get_register(rt_reg()));
+ }
+ inline int32_t rd_reg() const { return currentInstr_->RdValue(); }
+ inline int32_t fr_reg() const { return currentInstr_->FrValue(); }
+ inline int32_t fs_reg() const { return currentInstr_->FsValue(); }
+ inline int32_t ft_reg() const { return currentInstr_->FtValue(); }
+ inline int32_t fd_reg() const { return currentInstr_->FdValue(); }
+ inline int32_t sa() const { return currentInstr_->SaValue(); }
+
+ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
@@ -427,10 +424,9 @@ class Simulator {
kDivideByZero,
kNumExceptions
};
- int16_t exceptions[kNumExceptions];
// Exceptions.
- void SignalExceptions();
+ void SignalException(Exception e);
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
@@ -497,9 +493,10 @@ class Simulator {
#ifdef MIPS_ABI_N64
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
- static_cast<int>( \
- Simulator::current(Isolate::Current()) \
- ->Call(entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+ static_cast<int>(Simulator::current(Isolate::Current()) \
+ ->Call(entry, 10, p0, p1, p2, p3, p4, \
+ reinterpret_cast<int64_t*>(p5), p6, p7, NULL, \
+ p8))
#else // Must be O32 Abi.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
static_cast<int>( \
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index af01a18a8f..80296586d2 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -178,12 +178,12 @@ function ObserverCreate(callback, acceptList) {
function ObserverGetCallback(observer) {
- return IS_SPEC_FUNCTION(observer) ? observer : observer.callback;
+ return IS_CALLABLE(observer) ? observer : observer.callback;
}
function ObserverGetAcceptTypes(observer) {
- return IS_SPEC_FUNCTION(observer) ? defaultAcceptTypes : observer.accept;
+ return IS_CALLABLE(observer) ? defaultAcceptTypes : observer.accept;
}
@@ -238,8 +238,8 @@ function ObjectInfoGetNotifier(objectInfo) {
function ChangeObserversIsOptimized(changeObservers) {
- return IS_SPEC_FUNCTION(changeObservers) ||
- IS_SPEC_FUNCTION(changeObservers.callback);
+ return IS_CALLABLE(changeObservers) ||
+ IS_CALLABLE(changeObservers.callback);
}
@@ -330,7 +330,7 @@ function ConvertAcceptListToTypeMap(arg) {
if (!IS_SPEC_OBJECT(arg)) throw MakeTypeError(kObserveInvalidAccept);
- var len = $toInteger(arg.length);
+ var len = TO_INTEGER(arg.length);
if (len < 0) len = 0;
return TypeMapCreateFromList(arg, len);
@@ -390,7 +390,7 @@ function ObjectObserve(object, callback, acceptList) {
throw MakeTypeError(kObserveGlobalProxy, "observe");
if (%IsAccessCheckNeeded(object))
throw MakeTypeError(kObserveAccessChecked, "observe");
- if (!IS_SPEC_FUNCTION(callback))
+ if (!IS_CALLABLE(callback))
throw MakeTypeError(kObserveNonFunction, "observe");
if (ObjectIsFrozen(callback))
throw MakeTypeError(kObserveCallbackFrozen);
@@ -413,7 +413,7 @@ function ObjectUnobserve(object, callback) {
throw MakeTypeError(kObserveNonObject, "unobserve", "unobserve");
if (%IsJSGlobalProxy(object))
throw MakeTypeError(kObserveGlobalProxy, "unobserve");
- if (!IS_SPEC_FUNCTION(callback))
+ if (!IS_CALLABLE(callback))
throw MakeTypeError(kObserveNonFunction, "unobserve");
var objectInfo = ObjectInfoGet(object);
@@ -590,7 +590,7 @@ function ObjectNotifierPerformChange(changeType, changeFn) {
throw MakeTypeError(kObserveNotifyNonNotifier);
if (!IS_STRING(changeType))
throw MakeTypeError(kObservePerformNonString);
- if (!IS_SPEC_FUNCTION(changeFn))
+ if (!IS_CALLABLE(changeFn))
throw MakeTypeError(kObservePerformNonFunction);
var performChangeFn = %GetObjectContextNotifierPerformChange(objectInfo);
@@ -603,7 +603,7 @@ function NativeObjectNotifierPerformChange(objectInfo, changeType, changeFn) {
var changeRecord;
try {
- changeRecord = %_CallFunction(UNDEFINED, changeFn);
+ changeRecord = changeFn();
} finally {
ObjectInfoRemovePerformingType(objectInfo, changeType);
}
@@ -660,7 +660,7 @@ function CallbackDeliverPending(callback) {
function ObjectDeliverChangeRecords(callback) {
- if (!IS_SPEC_FUNCTION(callback))
+ if (!IS_CALLABLE(callback))
throw MakeTypeError(kObserveNonFunction, "deliverChangeRecords");
while (CallbackDeliverPending(callback)) {}
@@ -707,15 +707,14 @@ $observeEnqueueSpliceRecord = EnqueueSpliceRecord;
$observeBeginPerformSplice = BeginPerformSplice;
$observeEndPerformSplice = EndPerformSplice;
-utils.ExportToRuntime(function(to) {
- to.ObserveNotifyChange = NotifyChange;
- to.ObserveEnqueueSpliceRecord = EnqueueSpliceRecord;
- to.ObserveBeginPerformSplice = BeginPerformSplice;
- to.ObserveEndPerformSplice = EndPerformSplice;
- to.ObserveNativeObjectObserve = NativeObjectObserve;
- to.ObserveNativeObjectGetNotifier = NativeObjectGetNotifier;
- to.ObserveNativeObjectNotifierPerformChange =
- NativeObjectNotifierPerformChange;
-});
+%InstallToContext([
+ "native_object_get_notifier", NativeObjectGetNotifier,
+ "native_object_notifier_perform_change", NativeObjectNotifierPerformChange,
+ "native_object_observe", NativeObjectObserve,
+ "observers_begin_perform_splice", BeginPerformSplice,
+ "observers_end_perform_splice", EndPerformSplice,
+ "observers_enqueue_splice", EnqueueSpliceRecord,
+ "observers_notify_change", NotifyChange,
+]);
})
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 815a5b53f8..3ce7672c1c 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/objects.h"
#include "src/bootstrapper.h"
#include "src/disasm.h"
@@ -22,6 +22,7 @@ void Object::ObjectVerify() {
} else {
HeapObject::cast(this)->HeapObjectVerify();
}
+ CHECK(!IsConstructor() || IsCallable());
}
@@ -36,6 +37,8 @@ void Object::VerifyPointer(Object* p) {
void Smi::SmiVerify() {
CHECK(IsSmi());
+ CHECK(!IsCallable());
+ CHECK(!IsConstructor());
}
@@ -143,6 +146,9 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
+ case JS_ITERATOR_RESULT_TYPE:
+ JSIteratorResult::cast(this)->JSIteratorResultVerify();
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapVerify();
break;
@@ -205,7 +211,6 @@ void Symbol::SymbolVerify() {
CHECK(HasHashCode());
CHECK_GT(Hash(), 0u);
CHECK(name()->IsUndefined() || name()->IsString());
- CHECK(flags()->IsSmi());
}
@@ -225,6 +230,8 @@ void ByteArray::ByteArrayVerify() {
void BytecodeArray::BytecodeArrayVerify() {
// TODO(oth): Walk bytecodes and immediate values to validate sanity.
CHECK(IsBytecodeArray());
+ CHECK(constant_pool()->IsFixedArray());
+ VerifyHeapPointer(constant_pool());
}
@@ -337,7 +344,7 @@ void Map::DictionaryMapVerify() {
CHECK(is_dictionary_map());
CHECK(instance_descriptors()->IsEmpty());
CHECK_EQ(0, unused_property_fields());
- CHECK_EQ(StaticVisitorBase::GetVisitorId(this), visitor_id());
+ CHECK_EQ(Heap::GetStaticVisitorIdForMap(this), visitor_id());
}
@@ -531,6 +538,7 @@ void JSFunction::JSFunctionVerify() {
CHECK(next_function_link() == NULL ||
next_function_link()->IsUndefined() ||
next_function_link()->IsJSFunction());
+ CHECK(map()->is_callable());
}
@@ -737,6 +745,14 @@ void JSMapIterator::JSMapIteratorVerify() {
}
+void JSIteratorResult::JSIteratorResultVerify() {
+ CHECK(IsJSIteratorResult());
+ JSObjectVerify();
+ VerifyPointer(done());
+ VerifyPointer(value());
+}
+
+
void JSWeakMap::JSWeakMapVerify() {
CHECK(IsJSWeakMap());
JSObjectVerify();
@@ -809,6 +825,7 @@ void JSFunctionProxy::JSFunctionProxyVerify() {
JSProxyVerify();
VerifyPointer(call_trap());
VerifyPointer(construct_trap());
+ CHECK(map()->is_callable());
}
@@ -880,11 +897,18 @@ void PrototypeInfo::PrototypeInfoVerify() {
void AccessorInfo::AccessorInfoVerify() {
VerifyPointer(name());
- VerifyPointer(flag());
VerifyPointer(expected_receiver_type());
}
+void SloppyBlockWithEvalContextExtension::
+ SloppyBlockWithEvalContextExtensionVerify() {
+ CHECK(IsSloppyBlockWithEvalContextExtension());
+ VerifyObjectField(kScopeInfoOffset);
+ VerifyObjectField(kExtensionOffset);
+}
+
+
void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() {
CHECK(IsExecutableAccessorInfo());
AccessorInfoVerify();
@@ -980,12 +1004,8 @@ void Script::ScriptVerify() {
CHECK(IsScript());
VerifyPointer(source());
VerifyPointer(name());
- line_offset()->SmiVerify();
- column_offset()->SmiVerify();
VerifyPointer(wrapper());
- type()->SmiVerify();
VerifyPointer(line_ends());
- VerifyPointer(id());
}
@@ -1014,9 +1034,6 @@ void DebugInfo::DebugInfoVerify() {
void BreakPointInfo::BreakPointInfoVerify() {
CHECK(IsBreakPointInfo());
- code_position()->SmiVerify();
- source_position()->SmiVerify();
- statement_position()->SmiVerify();
VerifyPointer(break_point_objects());
}
#endif // VERIFY_HEAP
@@ -1228,7 +1245,8 @@ bool CanLeak(Object* obj, Heap* heap, bool skip_weak_cell) {
if (obj->IsMap()) {
Map* map = Map::cast(obj);
for (int i = 0; i < Heap::kStrongRootListLength; i++) {
- if (map == heap->roots_array_start()[i]) return false;
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ if (map == heap->root(root_index)) return false;
}
return true;
}
@@ -1295,7 +1313,7 @@ void Code::VerifyRecompiledCode(Code* old_code, Code* new_code) {
// Check call target for equality unless it's an IC or an interrupt check.
// In both cases they may be patched to be something else.
if (!old_target->is_handler() && !old_target->is_inline_cache_stub() &&
- new_target == isolate->builtins()->builtin(Builtins::kInterruptCheck)) {
+ new_target != isolate->builtins()->builtin(Builtins::kInterruptCheck)) {
CHECK_EQ(old_target, new_target);
}
old_it.next();
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index b7aba56f5a..3d39278cce 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -14,7 +14,7 @@
#include "src/base/atomicops.h"
#include "src/base/bits.h"
-#include "src/contexts.h"
+#include "src/contexts-inl.h"
#include "src/conversions-inl.h"
#include "src/factory.h"
#include "src/field-index-inl.h"
@@ -86,14 +86,6 @@ int PropertyDetails::field_width_in_words() const {
}
-// Getter that returns a tagged Smi and setter that writes a tagged Smi.
-#define ACCESSORS_TO_SMI(holder, name, offset) \
- Smi* holder::name() const { return Smi::cast(READ_FIELD(this, offset)); } \
- void holder::set_##name(Smi* value, WriteBarrierMode mode) { \
- WRITE_FIELD(this, offset, value); \
- }
-
-
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS(holder, name, offset) \
int holder::name() const { \
@@ -187,16 +179,20 @@ bool Object::IsUniqueName() const {
}
-bool Object::IsSpecObject() const {
- return Object::IsHeapObject()
- && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
+bool Object::IsCallable() const {
+ return Object::IsHeapObject() && HeapObject::cast(this)->map()->is_callable();
}
-bool Object::IsSpecFunction() const {
- if (!Object::IsHeapObject()) return false;
- InstanceType type = HeapObject::cast(this)->map()->instance_type();
- return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
+bool Object::IsConstructor() const {
+ return Object::IsHeapObject() &&
+ HeapObject::cast(this)->map()->is_constructor();
+}
+
+
+bool Object::IsSpecObject() const {
+ return Object::IsHeapObject()
+ && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
}
@@ -698,6 +694,7 @@ TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
+TYPE_CHECKER(JSIteratorResult, JS_ITERATOR_RESULT_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
@@ -733,6 +730,9 @@ bool Object::IsTransitionArray() const {
bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+bool Object::IsLiteralsArray() const { return IsFixedArray(); }
+
+
bool Object::IsDeoptimizationInputData() const {
// Must be a fixed array.
if (!IsFixedArray()) return false;
@@ -877,9 +877,6 @@ bool Object::IsWeakHashTable() const {
}
-bool Object::IsWeakValueHashTable() const { return IsHashTable(); }
-
-
bool Object::IsDictionary() const {
return IsHashTable() &&
this != HeapObject::cast(this)->GetHeap()->string_table();
@@ -1078,11 +1075,11 @@ bool Object::IsArgumentsMarker() const {
}
-double Object::Number() {
+double Object::Number() const {
DCHECK(IsNumber());
return IsSmi()
- ? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
- : reinterpret_cast<HeapNumber*>(this)->value();
+ ? static_cast<double>(reinterpret_cast<const Smi*>(this)->value())
+ : reinterpret_cast<const HeapNumber*>(this)->value();
}
@@ -1135,6 +1132,7 @@ bool Object::FitsRepresentation(Representation representation) {
}
+// static
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Object> object) {
return ToObject(
@@ -1142,6 +1140,14 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
}
+// static
+MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
+ ToPrimitiveHint hint) {
+ if (input->IsPrimitive()) return input;
+ return JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input), hint);
+}
+
+
bool Object::HasSpecificClassOf(String* name) {
return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
}
@@ -1480,6 +1486,8 @@ HeapObjectContents HeapObject::ContentType() {
} else if (type == JS_FUNCTION_TYPE) {
return HeapObjectContents::kMixedValues;
#endif
+ } else if (type == BYTECODE_ARRAY_TYPE) {
+ return HeapObjectContents::kMixedValues;
} else if (type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
type <= LAST_FIXED_TYPED_ARRAY_TYPE) {
return HeapObjectContents::kMixedValues;
@@ -1547,6 +1555,12 @@ bool Simd128Value::Equals(Simd128Value* that) {
}
+// static
+bool Simd128Value::Equals(Handle<Simd128Value> one, Handle<Simd128Value> two) {
+ return one->Equals(*two);
+}
+
+
#define SIMD128_VALUE_EQUALS(TYPE, Type, type, lane_count, lane_type) \
bool Type::Equals(Type* that) { \
for (int lane = 0; lane < lane_count; ++lane) { \
@@ -1596,8 +1610,11 @@ SIMD128_TYPES(SIMD128_VALUE_EQUALS)
SIMD128_NUMERIC_LANE_FNS(Float32x4, float, 4, FLOAT, kFloatSize)
SIMD128_NUMERIC_LANE_FNS(Int32x4, int32_t, 4, INT32, kInt32Size)
+SIMD128_NUMERIC_LANE_FNS(Uint32x4, uint32_t, 4, UINT32, kInt32Size)
SIMD128_NUMERIC_LANE_FNS(Int16x8, int16_t, 8, INT16, kShortSize)
+SIMD128_NUMERIC_LANE_FNS(Uint16x8, uint16_t, 8, UINT16, kShortSize)
SIMD128_NUMERIC_LANE_FNS(Int8x16, int8_t, 16, INT8, kCharSize)
+SIMD128_NUMERIC_LANE_FNS(Uint8x16, uint8_t, 16, UINT8, kCharSize)
#undef SIMD128_NUMERIC_LANE_FNS
@@ -1654,8 +1671,8 @@ void AllocationSite::Initialize() {
set_transition_info(Smi::FromInt(0));
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::FromInt(0));
- set_pretenure_data(Smi::FromInt(0));
- set_pretenure_create_count(Smi::FromInt(0));
+ set_pretenure_data(0);
+ set_pretenure_create_count(0);
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
@@ -1720,8 +1737,7 @@ bool AllocationSite::SitePointsToLiteral() {
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
- if (FLAG_pretenuring_call_new ||
- IsFastSmiElementsKind(boilerplate_elements_kind)) {
+ if (IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1731,9 +1747,8 @@ AllocationSiteMode AllocationSite::GetMode(
AllocationSiteMode AllocationSite::GetMode(ElementsKind from,
ElementsKind to) {
- if (FLAG_pretenuring_call_new ||
- (IsFastSmiElementsKind(from) &&
- IsMoreGeneralElementsKindTransition(from, to))) {
+ if (IsFastSmiElementsKind(from) &&
+ IsMoreGeneralElementsKindTransition(from, to)) {
return TRACK_ALLOCATION_SITE;
}
@@ -1752,59 +1767,52 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
AllocationSite::PretenureDecision AllocationSite::pretenure_decision() {
- int value = pretenure_data()->value();
+ int value = pretenure_data();
return PretenureDecisionBits::decode(value);
}
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
- int value = pretenure_data()->value();
- set_pretenure_data(
- Smi::FromInt(PretenureDecisionBits::update(value, decision)),
- SKIP_WRITE_BARRIER);
+ int value = pretenure_data();
+ set_pretenure_data(PretenureDecisionBits::update(value, decision));
}
bool AllocationSite::deopt_dependent_code() {
- int value = pretenure_data()->value();
+ int value = pretenure_data();
return DeoptDependentCodeBit::decode(value);
}
void AllocationSite::set_deopt_dependent_code(bool deopt) {
- int value = pretenure_data()->value();
- set_pretenure_data(Smi::FromInt(DeoptDependentCodeBit::update(value, deopt)),
- SKIP_WRITE_BARRIER);
+ int value = pretenure_data();
+ set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
}
int AllocationSite::memento_found_count() {
- int value = pretenure_data()->value();
+ int value = pretenure_data();
return MementoFoundCountBits::decode(value);
}
inline void AllocationSite::set_memento_found_count(int count) {
- int value = pretenure_data()->value();
+ int value = pretenure_data();
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
- (StaticVisitorBase::kMinObjectSizeInWords * kPointerSize +
+ (Heap::kMinObjectSizeInWords * kPointerSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
DCHECK(count < MementoFoundCountBits::kMax);
- set_pretenure_data(
- Smi::FromInt(MementoFoundCountBits::update(value, count)),
- SKIP_WRITE_BARRIER);
+ set_pretenure_data(MementoFoundCountBits::update(value, count));
}
-int AllocationSite::memento_create_count() {
- return pretenure_create_count()->value();
-}
+int AllocationSite::memento_create_count() { return pretenure_create_count(); }
void AllocationSite::set_memento_create_count(int count) {
- set_pretenure_create_count(Smi::FromInt(count), SKIP_WRITE_BARRIER);
+ set_pretenure_create_count(count);
}
@@ -2030,6 +2038,12 @@ void Oddball::set_kind(byte value) {
}
+// static
+Handle<Object> Oddball::ToNumber(Handle<Oddball> input) {
+ return handle(input->to_number(), input->GetIsolate());
+}
+
+
ACCESSORS(Cell, value, Object, kValueOffset)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
@@ -2128,6 +2142,8 @@ int JSObject::GetHeaderSize() {
return JSSetIterator::kSize;
case JS_MAP_ITERATOR_TYPE:
return JSMapIterator::kSize;
+ case JS_ITERATOR_RESULT_TYPE:
+ return JSIteratorResult::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
@@ -2528,6 +2544,21 @@ void WeakFixedArray::set_last_used_index(int index) {
}
+template <class T>
+T* WeakFixedArray::Iterator::Next() {
+ if (list_ != NULL) {
+ // Assert that list did not change during iteration.
+ DCHECK_EQ(last_used_index_, list_->last_used_index());
+ while (index_ < list_->Length()) {
+ Object* item = list_->Get(index_++);
+ if (item != Empty()) return T::cast(item);
+ }
+ list_ = NULL;
+ }
+ return NULL;
+}
+
+
int ArrayList::Length() {
if (FixedArray::cast(this)->length() == 0) return 0;
return Smi::cast(FixedArray::cast(this)->get(kLengthIndex))->value();
@@ -2753,7 +2784,7 @@ int BinarySearch(T* array, Name* name, int low, int high, int valid_entries,
DCHECK(low <= high);
while (low != high) {
- int mid = (low + high) / 2;
+ int mid = low + (high - low) / 2;
Name* mid_name = array->GetSortedKey(mid);
uint32_t mid_hash = mid_name->Hash();
@@ -3200,8 +3231,8 @@ int HashTable<Derived, Shape, Key>::FindEntry(Isolate* isolate, Key key,
Object* element = KeyAt(entry);
// Empty entry. Uses raw unchecked accessors because it is called by the
// string table during bootstrapping.
- if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
- if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
+ if (element == isolate->heap()->root(Heap::kUndefinedValueRootIndex)) break;
+ if (element != isolate->heap()->root(Heap::kTheHoleValueRootIndex) &&
Shape::IsMatch(key, element)) return entry;
entry = NextProbe(entry, count++, capacity);
}
@@ -3288,6 +3319,7 @@ CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSIteratorResult)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakMap)
@@ -3317,11 +3349,13 @@ CAST_ACCESSOR(String)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(Uint16x8)
+CAST_ACCESSOR(Uint32x4)
+CAST_ACCESSOR(Uint8x16)
CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
-CAST_ACCESSOR(WeakValueHashTable)
// static
@@ -3423,6 +3457,55 @@ void DeoptimizationOutputData::SetPcAndState(int index, Smi* offset) {
}
+Object* LiteralsArray::get(int index) const { return FixedArray::get(index); }
+
+
+void LiteralsArray::set(int index, Object* value) {
+ FixedArray::set(index, value);
+}
+
+
+void LiteralsArray::set(int index, Smi* value) {
+ FixedArray::set(index, value);
+}
+
+
+void LiteralsArray::set(int index, Object* value, WriteBarrierMode mode) {
+ FixedArray::set(index, value, mode);
+}
+
+
+LiteralsArray* LiteralsArray::cast(Object* object) {
+ SLOW_DCHECK(object->IsLiteralsArray());
+ return reinterpret_cast<LiteralsArray*>(object);
+}
+
+
+TypeFeedbackVector* LiteralsArray::feedback_vector() const {
+ return TypeFeedbackVector::cast(get(kVectorIndex));
+}
+
+
+void LiteralsArray::set_feedback_vector(TypeFeedbackVector* vector) {
+ set(kVectorIndex, vector);
+}
+
+
+Object* LiteralsArray::literal(int literal_index) const {
+ return get(kFirstLiteralIndex + literal_index);
+}
+
+
+void LiteralsArray::set_literal(int literal_index, Object* literal) {
+ set(kFirstLiteralIndex + literal_index, literal);
+}
+
+
+int LiteralsArray::literals_count() const {
+ return length() - kFirstLiteralIndex;
+}
+
+
void HandlerTable::SetRangeStart(int index, int value) {
set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
}
@@ -3494,7 +3577,7 @@ int FreeSpace::Size() { return size(); }
FreeSpace* FreeSpace::next() {
- DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
return reinterpret_cast<FreeSpace*>(
@@ -3503,7 +3586,7 @@ FreeSpace* FreeSpace::next() {
FreeSpace** FreeSpace::next_address() {
- DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
return reinterpret_cast<FreeSpace**>(address() + kNextOffset);
@@ -3511,7 +3594,7 @@ FreeSpace** FreeSpace::next_address() {
void FreeSpace::set_next(FreeSpace* next) {
- DCHECK(map() == GetHeap()->raw_unchecked_free_space_map() ||
+ DCHECK(map() == GetHeap()->root(Heap::kFreeSpaceMapRootIndex) ||
(!GetHeap()->deserialization_complete() && map() == NULL));
DCHECK_LE(kNextOffset + kPointerSize, nobarrier_size());
base::NoBarrier_Store(
@@ -3565,7 +3648,7 @@ bool Name::Equals(Handle<Name> one, Handle<Name> two) {
ACCESSORS(Symbol, name, Object, kNameOffset)
-ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
+SMI_ACCESSORS(Symbol, flags, kFlagsOffset)
BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
@@ -4019,6 +4102,11 @@ Address ByteArray::GetDataStartAddress() {
}
+void BytecodeArray::BytecodeArrayIterateBody(ObjectVisitor* v) {
+ IteratePointer(v, kConstantPoolOffset);
+}
+
+
byte BytecodeArray::get(int index) {
DCHECK(index >= 0 && index < this->length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -4043,6 +4131,30 @@ int BytecodeArray::frame_size() const {
}
+int BytecodeArray::register_count() const {
+ return frame_size() / kPointerSize;
+}
+
+
+void BytecodeArray::set_parameter_count(int number_of_parameters) {
+ DCHECK_GE(number_of_parameters, 0);
+ // Parameter count is stored as the size on stack of the parameters to allow
+ // it to be used directly by generated code.
+ WRITE_INT_FIELD(this, kParameterSizeOffset,
+ (number_of_parameters << kPointerSizeLog2));
+}
+
+
+int BytecodeArray::parameter_count() const {
+ // Parameter count is stored as the size on stack of the parameters to allow
+ // it to be used directly by generated code.
+ return READ_INT_FIELD(this, kParameterSizeOffset) >> kPointerSizeLog2;
+}
+
+
+ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
+
+
Address BytecodeArray::GetFirstBytecodeAddress() {
return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
}
@@ -4456,23 +4568,27 @@ bool Map::has_non_instance_prototype() {
}
-void Map::set_function_with_prototype(bool value) {
- set_bit_field(FunctionWithPrototype::update(bit_field(), value));
+void Map::set_is_constructor(bool value) {
+ if (value) {
+ set_bit_field(bit_field() | (1 << kIsConstructor));
+ } else {
+ set_bit_field(bit_field() & ~(1 << kIsConstructor));
+ }
}
-bool Map::function_with_prototype() {
- return FunctionWithPrototype::decode(bit_field());
+bool Map::is_constructor() const {
+ return ((1 << kIsConstructor) & bit_field()) != 0;
}
void Map::set_is_hidden_prototype() {
- set_bit_field(bit_field() | (1 << kIsHiddenPrototype));
+ set_bit_field3(IsHiddenPrototype::update(bit_field3(), true));
}
-bool Map::is_hidden_prototype() {
- return ((1 << kIsHiddenPrototype) & bit_field()) != 0;
+bool Map::is_hidden_prototype() const {
+ return IsHiddenPrototype::decode(bit_field3());
}
@@ -4618,13 +4734,11 @@ bool Map::owns_descriptors() {
}
-void Map::set_has_instance_call_handler() {
- set_bit_field3(HasInstanceCallHandler::update(bit_field3(), true));
-}
+void Map::set_is_callable() { set_bit_field(bit_field() | (1 << kIsCallable)); }
-bool Map::has_instance_call_handler() {
- return HasInstanceCallHandler::decode(bit_field3());
+bool Map::is_callable() const {
+ return ((1 << kIsCallable) & bit_field()) != 0;
}
@@ -4721,6 +4835,7 @@ bool Map::IsJSObjectMap() {
return instance_type() >= FIRST_JS_OBJECT_TYPE;
}
bool Map::IsJSArrayMap() { return instance_type() == JS_ARRAY_TYPE; }
+bool Map::IsJSFunctionMap() { return instance_type() == JS_FUNCTION_TYPE; }
bool Map::IsStringMap() { return instance_type() < FIRST_NONSTRING_TYPE; }
bool Map::IsJSProxyMap() {
InstanceType type = instance_type();
@@ -5292,11 +5407,11 @@ void Map::UpdateDescriptors(DescriptorArray* descriptors,
// TODO(ishell): remove these checks from VERIFY_HEAP mode.
if (FLAG_verify_heap) {
CHECK(layout_descriptor()->IsConsistentWithMap(this));
- CHECK(visitor_id() == StaticVisitorBase::GetVisitorId(this));
+ CHECK(visitor_id() == Heap::GetStaticVisitorIdForMap(this));
}
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
- DCHECK(visitor_id() == StaticVisitorBase::GetVisitorId(this));
+ DCHECK(visitor_id() == Heap::GetStaticVisitorIdForMap(this));
#endif
}
}
@@ -5318,7 +5433,7 @@ void Map::InitializeDescriptors(DescriptorArray* descriptors,
#else
SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
#endif
- set_visitor_id(StaticVisitorBase::GetVisitorId(this));
+ set_visitor_id(Heap::GetStaticVisitorIdForMap(this));
}
}
@@ -5441,7 +5556,7 @@ ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
ACCESSORS(AccessorInfo, name, Object, kNameOffset)
-ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset)
+SMI_ACCESSORS(AccessorInfo, flag, kFlagOffset)
ACCESSORS(AccessorInfo, expected_receiver_type, Object,
kExpectedReceiverTypeOffset)
@@ -5456,6 +5571,11 @@ SMI_ACCESSORS(PrototypeInfo, registry_slot, kRegistrySlotOffset)
ACCESSORS(PrototypeInfo, validity_cell, Object, kValidityCellOffset)
ACCESSORS(PrototypeInfo, constructor_name, Object, kConstructorNameOffset)
+ACCESSORS(SloppyBlockWithEvalContextExtension, scope_info, ScopeInfo,
+ kScopeInfoOffset)
+ACCESSORS(SloppyBlockWithEvalContextExtension, extension, JSObject,
+ kExtensionOffset)
+
ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
@@ -5500,7 +5620,7 @@ ACCESSORS(FunctionTemplateInfo, instance_call_handler, Object,
kInstanceCallHandlerOffset)
ACCESSORS(FunctionTemplateInfo, access_check_info, Object,
kAccessCheckInfoOffset)
-ACCESSORS_TO_SMI(FunctionTemplateInfo, flag, kFlagOffset)
+SMI_ACCESSORS(FunctionTemplateInfo, flag, kFlagOffset)
ACCESSORS(ObjectTemplateInfo, constructor, Object, kConstructorOffset)
ACCESSORS(ObjectTemplateInfo, internal_field_count, Object,
@@ -5510,9 +5630,9 @@ ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset)
ACCESSORS(AllocationSite, transition_info, Object, kTransitionInfoOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-ACCESSORS_TO_SMI(AllocationSite, pretenure_data, kPretenureDataOffset)
-ACCESSORS_TO_SMI(AllocationSite, pretenure_create_count,
- kPretenureCreateCountOffset)
+SMI_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
+SMI_ACCESSORS(AllocationSite, pretenure_create_count,
+ kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode,
kDependentCodeOffset)
ACCESSORS(AllocationSite, weak_next, Object, kWeakNextOffset)
@@ -5520,18 +5640,18 @@ ACCESSORS(AllocationMemento, allocation_site, Object, kAllocationSiteOffset)
ACCESSORS(Script, source, Object, kSourceOffset)
ACCESSORS(Script, name, Object, kNameOffset)
-ACCESSORS(Script, id, Smi, kIdOffset)
-ACCESSORS_TO_SMI(Script, line_offset, kLineOffsetOffset)
-ACCESSORS_TO_SMI(Script, column_offset, kColumnOffsetOffset)
+SMI_ACCESSORS(Script, id, kIdOffset)
+SMI_ACCESSORS(Script, line_offset, kLineOffsetOffset)
+SMI_ACCESSORS(Script, column_offset, kColumnOffsetOffset)
ACCESSORS(Script, context_data, Object, kContextOffset)
ACCESSORS(Script, wrapper, HeapObject, kWrapperOffset)
-ACCESSORS_TO_SMI(Script, type, kTypeOffset)
+SMI_ACCESSORS(Script, type, kTypeOffset)
ACCESSORS(Script, line_ends, Object, kLineEndsOffset)
ACCESSORS(Script, eval_from_shared, Object, kEvalFromSharedOffset)
-ACCESSORS_TO_SMI(Script, eval_from_instructions_offset,
- kEvalFrominstructionsOffsetOffset)
+SMI_ACCESSORS(Script, eval_from_instructions_offset,
+ kEvalFrominstructionsOffsetOffset)
ACCESSORS(Script, shared_function_infos, Object, kSharedFunctionInfosOffset)
-ACCESSORS_TO_SMI(Script, flags, kFlagsOffset)
+SMI_ACCESSORS(Script, flags, kFlagsOffset)
ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
@@ -5543,6 +5663,10 @@ void Script::set_compilation_type(CompilationType type) {
set_flags(BooleanBit::set(flags(), kCompilationTypeBit,
type == COMPILATION_TYPE_EVAL));
}
+bool Script::hide_source() { return BooleanBit::get(flags(), kHideSourceBit); }
+void Script::set_hide_source(bool value) {
+ set_flags(BooleanBit::set(flags(), kHideSourceBit, value));
+}
Script::CompilationState Script::compilation_state() {
return BooleanBit::get(flags(), kCompilationStateBit) ?
COMPILATION_STATE_COMPILED : COMPILATION_STATE_INITIAL;
@@ -5552,13 +5676,13 @@ void Script::set_compilation_state(CompilationState state) {
state == COMPILATION_STATE_COMPILED));
}
ScriptOriginOptions Script::origin_options() {
- return ScriptOriginOptions((flags()->value() & kOriginOptionsMask) >>
+ return ScriptOriginOptions((flags() & kOriginOptionsMask) >>
kOriginOptionsShift);
}
void Script::set_origin_options(ScriptOriginOptions origin_options) {
DCHECK(!(origin_options.Flags() & ~((1 << kOriginOptionsSize) - 1)));
- set_flags(Smi::FromInt((flags()->value() & ~kOriginOptionsMask) |
- (origin_options.Flags() << kOriginOptionsShift)));
+ set_flags((flags() & ~kOriginOptionsMask) |
+ (origin_options.Flags() << kOriginOptionsShift));
}
@@ -5566,9 +5690,9 @@ ACCESSORS(DebugInfo, shared, SharedFunctionInfo, kSharedFunctionInfoIndex)
ACCESSORS(DebugInfo, code, Code, kCodeIndex)
ACCESSORS(DebugInfo, break_points, FixedArray, kBreakPointsStateIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, code_position, kCodePositionIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, source_position, kSourcePositionIndex)
-ACCESSORS_TO_SMI(BreakPointInfo, statement_position, kStatementPositionIndex)
+SMI_ACCESSORS(BreakPointInfo, code_position, kCodePositionIndex)
+SMI_ACCESSORS(BreakPointInfo, source_position, kSourcePositionIndex)
+SMI_ACCESSORS(BreakPointInfo, statement_position, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
@@ -6008,18 +6132,19 @@ void SharedFunctionInfo::set_disable_optimization_reason(BailoutReason reason) {
}
-bool SharedFunctionInfo::IsSubjectToDebugging() {
+bool SharedFunctionInfo::IsBuiltin() {
Object* script_obj = script();
- if (script_obj->IsUndefined()) return false;
+ if (script_obj->IsUndefined()) return true;
Script* script = Script::cast(script_obj);
- Script::Type type = static_cast<Script::Type>(script->type()->value());
- return type == Script::TYPE_NORMAL;
+ Script::Type type = static_cast<Script::Type>(script->type());
+ return type != Script::TYPE_NORMAL;
}
-bool JSFunction::IsBuiltin() {
- return context()->global_object()->IsJSBuiltinsObject();
-}
+bool SharedFunctionInfo::IsSubjectToDebugging() { return !IsBuiltin(); }
+
+
+bool JSFunction::IsBuiltin() { return shared()->IsBuiltin(); }
bool JSFunction::IsSubjectToDebugging() {
@@ -6172,11 +6297,6 @@ Object* JSFunction::prototype() {
}
-bool JSFunction::should_have_prototype() {
- return map()->function_with_prototype();
-}
-
-
bool JSFunction::is_compiled() {
Builtins* builtins = GetIsolate()->builtins();
return code() != builtins->builtin(Builtins::kCompileLazy) &&
@@ -6190,13 +6310,13 @@ bool JSFunction::has_simple_parameters() {
}
-FixedArray* JSFunction::literals() {
+LiteralsArray* JSFunction::literals() {
DCHECK(!shared()->bound());
- return literals_or_bindings();
+ return LiteralsArray::cast(literals_or_bindings());
}
-void JSFunction::set_literals(FixedArray* literals) {
+void JSFunction::set_literals(LiteralsArray* literals) {
DCHECK(!shared()->bound());
set_literals_or_bindings(literals);
}
@@ -6224,23 +6344,9 @@ int JSFunction::NumberOfLiterals() {
}
-Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
- DCHECK(id < kJSBuiltinsCount); // id is unsigned.
- return READ_FIELD(this, OffsetOfFunctionWithId(id));
-}
-
-
-void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
- Object* value) {
- DCHECK(id < kJSBuiltinsCount); // id is unsigned.
- WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
- WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
-}
-
-
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
ACCESSORS(JSProxy, hash, Object, kHashOffset)
-ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
+ACCESSORS(JSFunctionProxy, call_trap, JSReceiver, kCallTrapOffset)
ACCESSORS(JSFunctionProxy, construct_trap, Object, kConstructTrapOffset)
@@ -6364,6 +6470,8 @@ void Code::WipeOutHeader() {
if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
}
+ WRITE_FIELD(this, kNextCodeLinkOffset, NULL);
+ WRITE_FIELD(this, kGCMetadataOffset, NULL);
}
@@ -6996,6 +7104,78 @@ String* String::GetForwardedInternalizedString() {
}
+// static
+Maybe<bool> Object::GreaterThan(Handle<Object> x, Handle<Object> y,
+ Strength strength) {
+ Maybe<ComparisonResult> result = Compare(x, y, strength);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kGreaterThan:
+ return Just(true);
+ case ComparisonResult::kLessThan:
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
+// static
+Maybe<bool> Object::GreaterThanOrEqual(Handle<Object> x, Handle<Object> y,
+ Strength strength) {
+ Maybe<ComparisonResult> result = Compare(x, y, strength);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kGreaterThan:
+ return Just(true);
+ case ComparisonResult::kLessThan:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
+// static
+Maybe<bool> Object::LessThan(Handle<Object> x, Handle<Object> y,
+ Strength strength) {
+ Maybe<ComparisonResult> result = Compare(x, y, strength);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kLessThan:
+ return Just(true);
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kGreaterThan:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
+// static
+Maybe<bool> Object::LessThanOrEqual(Handle<Object> x, Handle<Object> y,
+ Strength strength) {
+ Maybe<ComparisonResult> result = Compare(x, y, strength);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kEqual:
+ case ComparisonResult::kLessThan:
+ return Just(true);
+ case ComparisonResult::kGreaterThan:
+ case ComparisonResult::kUndefined:
+ return Just(false);
+ }
+ }
+ return Nothing<bool>();
+}
+
+
MaybeHandle<Object> Object::GetPropertyOrElement(Handle<Object> object,
Handle<Name> name,
LanguageMode language_mode) {
@@ -7149,12 +7329,12 @@ void AccessorInfo::set_is_special_data_property(bool value) {
PropertyAttributes AccessorInfo::property_attributes() {
- return AttributesField::decode(static_cast<uint32_t>(flag()->value()));
+ return AttributesField::decode(static_cast<uint32_t>(flag()));
}
void AccessorInfo::set_property_attributes(PropertyAttributes attributes) {
- set_flag(Smi::FromInt(AttributesField::update(flag()->value(), attributes)));
+ set_flag(AttributesField::update(flag(), attributes));
}
@@ -7207,7 +7387,7 @@ bool AccessorPair::ContainsAccessor() {
bool AccessorPair::IsJSAccessor(Object* obj) {
- return obj->IsSpecFunction() || obj->IsUndefined();
+ return obj->IsCallable() || obj->IsUndefined();
}
@@ -7767,6 +7947,10 @@ Object* JSMapIterator::CurrentValue() {
}
+ACCESSORS(JSIteratorResult, done, Object, kDoneOffset)
+ACCESSORS(JSIteratorResult, value, Object, kValueOffset)
+
+
String::SubStringRange::SubStringRange(String* string, int first, int length)
: string_(string),
first_(first),
@@ -7820,7 +8004,6 @@ String::SubStringRange::iterator String::SubStringRange::end() {
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef ACCESSORS
-#undef ACCESSORS_TO_SMI
#undef SMI_ACCESSORS
#undef SYNCHRONIZED_SMI_ACCESSORS
#undef NOBARRIER_SMI_ACCESSORS
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 799561eb8b..8dfd0a17b0 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/objects.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/interpreter/bytecodes.h"
+#include "src/objects-inl.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
@@ -143,6 +144,9 @@ void HeapObject::HeapObjectPrint(std::ostream& os) { // NOLINT
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorPrint(os);
break;
+ case JS_ITERATOR_RESULT_TYPE:
+ JSIteratorResult::cast(this)->JSIteratorResultPrint(os);
+ break;
case JS_WEAK_MAP_TYPE:
JSWeakMap::cast(this)->JSWeakMapPrint(os);
break;
@@ -220,8 +224,11 @@ void Float32x4::Float32x4Print(std::ostream& os) { // NOLINT
} \
}
SIMD128_INT_PRINT_FUNCTION(Int32x4, 4)
+SIMD128_INT_PRINT_FUNCTION(Uint32x4, 4)
SIMD128_INT_PRINT_FUNCTION(Int16x8, 8)
+SIMD128_INT_PRINT_FUNCTION(Uint16x8, 8)
SIMD128_INT_PRINT_FUNCTION(Int8x16, 16)
+SIMD128_INT_PRINT_FUNCTION(Uint8x16, 16)
#undef SIMD128_INT_PRINT_FUNCTION
@@ -449,7 +456,8 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (has_named_interceptor()) os << " - named_interceptor\n";
if (has_indexed_interceptor()) os << " - indexed_interceptor\n";
if (is_undetectable()) os << " - undetectable\n";
- if (has_instance_call_handler()) os << " - instance_call_handler\n";
+ if (is_callable()) os << " - callable\n";
+ if (is_constructor()) os << " - constructor\n";
if (is_access_check_needed()) os << " - access_check_needed\n";
if (!is_extensible()) os << " - non-extensible\n";
if (is_observed()) os << " - observed\n";
@@ -562,19 +570,38 @@ void TypeFeedbackVector::TypeFeedbackVectorPrint(std::ostream& os) { // NOLINT
for (int i = 0; i < ICSlots(); i++) {
FeedbackVectorICSlot slot(i);
- Code::Kind kind = GetKind(slot);
- os << "\n ICSlot " << i;
- if (kind == Code::LOAD_IC) {
- LoadICNexus nexus(this, slot);
- os << " LOAD_IC " << Code::ICState2String(nexus.StateFromFeedback());
- } else if (kind == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus nexus(this, slot);
- os << " KEYED_LOAD_IC "
- << Code::ICState2String(nexus.StateFromFeedback());
- } else {
- DCHECK(kind == Code::CALL_IC);
- CallICNexus nexus(this, slot);
- os << " CALL_IC " << Code::ICState2String(nexus.StateFromFeedback());
+ FeedbackVectorSlotKind kind = GetKind(slot);
+ os << "\n ICSlot " << i << " " << kind << " ";
+ switch (kind) {
+ case FeedbackVectorSlotKind::LOAD_IC: {
+ LoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+ KeyedLoadICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::CALL_IC: {
+ CallICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::STORE_IC: {
+ StoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ KeyedStoreICNexus nexus(this, slot);
+ os << Code::ICState2String(nexus.StateFromFeedback());
+ break;
+ }
+ case FeedbackVectorSlotKind::UNUSED:
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ UNREACHABLE();
+ break;
}
os << "\n [" << GetIndex(slot) << "]: " << Brief(Get(slot));
@@ -745,6 +772,15 @@ void JSMapIterator::JSMapIteratorPrint(std::ostream& os) { // NOLINT
}
+void JSIteratorResult::JSIteratorResultPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "JSIteratorResult");
+ os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+ os << " - done = " << Brief(done()) << "\n";
+ os << " - value = " << Brief(value()) << "\n";
+ os << "\n";
+}
+
+
void JSWeakMap::JSWeakMapPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "JSWeakMap");
os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
@@ -913,7 +949,7 @@ void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(
std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "ExecutableAccessorInfo");
os << "\n - name: " << Brief(name());
- os << "\n - flag: " << Brief(flag());
+ os << "\n - flag: " << flag();
os << "\n - getter: " << Brief(getter());
os << "\n - setter: " << Brief(setter());
os << "\n - data: " << Brief(data());
@@ -938,6 +974,15 @@ void PrototypeInfo::PrototypeInfoPrint(std::ostream& os) { // NOLINT
}
+void SloppyBlockWithEvalContextExtension::
+ SloppyBlockWithEvalContextExtensionPrint(std::ostream& os) { // NOLINT
+ HeapObject::PrintHeader(os, "SloppyBlockWithEvalContextExtension");
+ os << "\n - scope_info: " << Brief(scope_info());
+ os << "\n - extension: " << Brief(extension());
+ os << "\n";
+}
+
+
void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessorPair");
os << "\n - getter: " << Brief(getter());
@@ -1056,17 +1101,17 @@ void Script::ScriptPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "Script");
os << "\n - source: " << Brief(source());
os << "\n - name: " << Brief(name());
- os << "\n - line_offset: " << Brief(line_offset());
- os << "\n - column_offset: " << Brief(column_offset());
- os << "\n - type: " << Brief(type());
- os << "\n - id: " << Brief(id());
+ os << "\n - line_offset: " << line_offset();
+ os << "\n - column_offset: " << column_offset();
+ os << "\n - type: " << type();
+ os << "\n - id: " << id();
os << "\n - context data: " << Brief(context_data());
os << "\n - wrapper: " << Brief(wrapper());
os << "\n - compilation type: " << compilation_type();
os << "\n - line ends: " << Brief(line_ends());
os << "\n - eval from shared: " << Brief(eval_from_shared());
os << "\n - eval from instructions offset: "
- << Brief(eval_from_instructions_offset());
+ << eval_from_instructions_offset();
os << "\n - shared function infos: " << Brief(shared_function_infos());
os << "\n";
}
@@ -1083,9 +1128,9 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
void BreakPointInfo::BreakPointInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "BreakPointInfo");
- os << "\n - code_position: " << code_position()->value();
- os << "\n - source_position: " << source_position()->value();
- os << "\n - statement_position: " << statement_position()->value();
+ os << "\n - code_position: " << code_position();
+ os << "\n - source_position: " << source_position();
+ os << "\n - statement_position: " << statement_position();
os << "\n - break_point_objects: " << Brief(break_point_objects());
os << "\n";
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 65d5d5f528..08383030d8 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/objects.h"
+
+#include <cmath>
#include <iomanip>
#include <sstream>
-#include "src/v8.h"
-
#include "src/accessors.h"
#include "src/allocation-site-scopes.h"
#include "src/api.h"
@@ -18,7 +19,6 @@
#include "src/codegen.h"
#include "src/compilation-dependencies.h"
#include "src/compiler.h"
-#include "src/cpu-profiler.h"
#include "src/date.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@@ -30,13 +30,16 @@
#include "src/hydrogen.h"
#include "src/ic/ic.h"
#include "src/interpreter/bytecodes.h"
+#include "src/isolate-inl.h"
#include "src/log.h"
#include "src/lookup.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
#include "src/objects-inl.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/prototype.h"
#include "src/safepoint-table.h"
+#include "src/string-builder.h"
#include "src/string-search.h"
#include "src/string-stream.h"
#include "src/utils.h"
@@ -90,6 +93,106 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
}
+// static
+MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input, Object::ToPrimitive(input, ToPrimitiveHint::kString),
+ Name);
+ if (input->IsName()) return Handle<Name>::cast(input);
+ return ToString(isolate, input);
+}
+
+
+// static
+MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
+ while (true) {
+ if (input->IsNumber()) {
+ return input;
+ }
+ if (input->IsString()) {
+ return String::ToNumber(Handle<String>::cast(input));
+ }
+ if (input->IsOddball()) {
+ return Oddball::ToNumber(Handle<Oddball>::cast(input));
+ }
+ Isolate* const isolate = Handle<HeapObject>::cast(input)->GetIsolate();
+ if (input->IsSymbol()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToNumber),
+ Object);
+ }
+ if (input->IsSimd128Value()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSimdToNumber),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kNumber),
+ Object);
+ }
+}
+
+
+// static
+MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ return isolate->factory()->NewNumber(DoubleToInteger(input->Number()));
+}
+
+
+// static
+MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ return isolate->factory()->NewNumberFromInt(DoubleToInt32(input->Number()));
+}
+
+
+// static
+MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ return isolate->factory()->NewNumberFromUint(DoubleToUint32(input->Number()));
+}
+
+
+// static
+MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
+ while (true) {
+ if (input->IsString()) {
+ return Handle<String>::cast(input);
+ }
+ if (input->IsOddball()) {
+ return handle(Handle<Oddball>::cast(input)->to_string(), isolate);
+ }
+ if (input->IsNumber()) {
+ return isolate->factory()->NumberToString(input);
+ }
+ if (input->IsSymbol()) {
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kSymbolToString),
+ String);
+ }
+ if (input->IsSimd128Value()) {
+ return Simd128Value::ToString(Handle<Simd128Value>::cast(input));
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, input, JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input),
+ ToPrimitiveHint::kString),
+ String);
+ }
+}
+
+
+// static
+MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, input, ToNumber(input), Object);
+ double len = DoubleToInteger(input->Number());
+ if (len <= 0.0) {
+ len = 0.0;
+ } else if (len >= kMaxSafeInteger) {
+ len = kMaxSafeInteger;
+ }
+ return isolate->factory()->NewNumber(len);
+}
+
+
bool Object::BooleanValue() {
if (IsBoolean()) return IsTrue();
if (IsSmi()) return Smi::cast(this)->value() != 0;
@@ -97,18 +200,169 @@ bool Object::BooleanValue() {
if (IsUndetectableObject()) return false; // Undetectable object is false.
if (IsString()) return String::cast(this)->length() != 0;
if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue();
- if (IsSimd128Value()) return true; // Simd value types evaluate to true.
return true;
}
+namespace {
+
+// TODO(bmeurer): Maybe we should introduce a marker interface Number,
+// where we put all these methods at some point?
+ComparisonResult NumberCompare(double x, double y) {
+ if (std::isnan(x) || std::isnan(y)) {
+ return ComparisonResult::kUndefined;
+ } else if (x < y) {
+ return ComparisonResult::kLessThan;
+ } else if (x > y) {
+ return ComparisonResult::kGreaterThan;
+ } else {
+ return ComparisonResult::kEqual;
+ }
+}
+
+
+bool NumberEquals(double x, double y) {
+ // Must check explicitly for NaN's on Windows, but -0 works fine.
+ if (std::isnan(x)) return false;
+ if (std::isnan(y)) return false;
+ return x == y;
+}
+
+
+bool NumberEquals(const Object* x, const Object* y) {
+ return NumberEquals(x->Number(), y->Number());
+}
+
+
+bool NumberEquals(Handle<Object> x, Handle<Object> y) {
+ return NumberEquals(*x, *y);
+}
+
+} // namespace
+
+
+// static
+Maybe<ComparisonResult> Object::Compare(Handle<Object> x, Handle<Object> y,
+ Strength strength) {
+ if (!is_strong(strength)) {
+ // ES6 section 7.2.11 Abstract Relational Comparison step 3 and 4.
+ if (!Object::ToPrimitive(x, ToPrimitiveHint::kNumber).ToHandle(&x) ||
+ !Object::ToPrimitive(y, ToPrimitiveHint::kNumber).ToHandle(&y)) {
+ return Nothing<ComparisonResult>();
+ }
+ }
+ if (x->IsString() && y->IsString()) {
+ // ES6 section 7.2.11 Abstract Relational Comparison step 5.
+ return Just(
+ String::Compare(Handle<String>::cast(x), Handle<String>::cast(y)));
+ }
+ // ES6 section 7.2.11 Abstract Relational Comparison step 6.
+ if (!is_strong(strength)) {
+ if (!Object::ToNumber(x).ToHandle(&x) ||
+ !Object::ToNumber(y).ToHandle(&y)) {
+ return Nothing<ComparisonResult>();
+ }
+ } else {
+ if (!x->IsNumber()) {
+ Isolate* const isolate = Handle<HeapObject>::cast(x)->GetIsolate();
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kStrongImplicitConversion));
+ return Nothing<ComparisonResult>();
+ } else if (!y->IsNumber()) {
+ Isolate* const isolate = Handle<HeapObject>::cast(y)->GetIsolate();
+ isolate->Throw(*isolate->factory()->NewTypeError(
+ MessageTemplate::kStrongImplicitConversion));
+ return Nothing<ComparisonResult>();
+ }
+ }
+ return Just(NumberCompare(x->Number(), y->Number()));
+}
+
+
+// static
+Maybe<bool> Object::Equals(Handle<Object> x, Handle<Object> y) {
+ while (true) {
+ if (x->IsNumber()) {
+ if (y->IsNumber()) {
+ return Just(NumberEquals(x, y));
+ } else if (y->IsBoolean()) {
+ return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
+ } else if (y->IsString()) {
+ return Just(NumberEquals(x, String::ToNumber(Handle<String>::cast(y))));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsString()) {
+ if (y->IsString()) {
+ return Just(
+ String::Equals(Handle<String>::cast(x), Handle<String>::cast(y)));
+ } else if (y->IsNumber()) {
+ x = String::ToNumber(Handle<String>::cast(x));
+ return Just(NumberEquals(x, y));
+ } else if (y->IsBoolean()) {
+ x = String::ToNumber(Handle<String>::cast(x));
+ return Just(NumberEquals(*x, Handle<Oddball>::cast(y)->to_number()));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsBoolean()) {
+ if (y->IsOddball()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsNumber()) {
+ return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
+ } else if (y->IsString()) {
+ y = String::ToNumber(Handle<String>::cast(y));
+ return Just(NumberEquals(Handle<Oddball>::cast(x)->to_number(), *y));
+ } else if (y->IsJSReceiver() && !y->IsUndetectableObject()) {
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(y))
+ .ToHandle(&y)) {
+ return Nothing<bool>();
+ }
+ x = Oddball::ToNumber(Handle<Oddball>::cast(x));
+ } else {
+ return Just(false);
+ }
+ } else if (x->IsSymbol()) {
+ return Just(x.is_identical_to(y));
+ } else if (x->IsSimd128Value()) {
+ if (!y->IsSimd128Value()) return Just(false);
+ return Just(Simd128Value::Equals(Handle<Simd128Value>::cast(x),
+ Handle<Simd128Value>::cast(y)));
+ } else if (x->IsJSReceiver() && !x->IsUndetectableObject()) {
+ if (y->IsJSReceiver()) {
+ return Just(x.is_identical_to(y));
+ } else if (y->IsNull() || y->IsSimd128Value() || y->IsSymbol() ||
+ y->IsUndefined()) {
+ return Just(false);
+ } else if (y->IsBoolean()) {
+ y = Oddball::ToNumber(Handle<Oddball>::cast(y));
+ }
+ if (!JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(x)).ToHandle(&x)) {
+ return Nothing<bool>();
+ }
+ } else {
+ return Just(
+ (x->IsNull() || x->IsUndefined() || x->IsUndetectableObject()) &&
+ (y->IsNull() || y->IsUndefined() || y->IsUndetectableObject()));
+ }
+ }
+}
+
+
bool Object::StrictEquals(Object* that) {
if (this->IsNumber()) {
if (!that->IsNumber()) return false;
- double const x = this->Number();
- double const y = that->Number();
- // Must check explicitly for NaN:s on Windows, but -0 works fine.
- return x == y && !std::isnan(x) && !std::isnan(y);
+ return NumberEquals(this, that);
} else if (this->IsString()) {
if (!that->IsString()) return false;
return String::cast(this)->Equals(String::cast(that));
@@ -120,14 +374,217 @@ bool Object::StrictEquals(Object* that) {
}
-bool Object::IsCallable() const {
- const Object* fun = this;
- while (fun->IsJSFunctionProxy()) {
- fun = JSFunctionProxy::cast(fun)->call_trap();
+// static
+Handle<String> Object::TypeOf(Isolate* isolate, Handle<Object> object) {
+ if (object->IsNumber()) return isolate->factory()->number_string();
+ if (object->IsUndefined() || object->IsUndetectableObject()) {
+ return isolate->factory()->undefined_string();
+ }
+ if (object->IsBoolean()) return isolate->factory()->boolean_string();
+ if (object->IsSymbol()) return isolate->factory()->symbol_string();
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ if (object->Is##Type()) return isolate->factory()->type##_string();
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ if (object->IsCallable()) return isolate->factory()->function_string();
+ return isolate->factory()->object_string();
+}
+
+
+// static
+MaybeHandle<Object> Object::Multiply(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumber(lhs->Number() * rhs->Number());
+}
+
+
+// static
+MaybeHandle<Object> Object::Divide(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumber(lhs->Number() / rhs->Number());
+}
+
+
+// static
+MaybeHandle<Object> Object::Modulus(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumber(modulo(lhs->Number(), rhs->Number()));
+}
+
+
+// static
+MaybeHandle<Object> Object::Add(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (lhs->IsNumber() && rhs->IsNumber()) {
+ return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
+ } else if (lhs->IsString() && rhs->IsString()) {
+ return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
+ Handle<String>::cast(rhs));
+ } else if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToPrimitive(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToPrimitive(rhs), Object);
+ if (lhs->IsString() || rhs->IsString()) {
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToString(isolate, rhs),
+ Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToString(isolate, lhs),
+ Object);
+ return isolate->factory()->NewConsString(Handle<String>::cast(lhs),
+ Handle<String>::cast(rhs));
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ return isolate->factory()->NewNumber(lhs->Number() + rhs->Number());
+}
+
+
+// static
+MaybeHandle<Object> Object::Subtract(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumber(lhs->Number() - rhs->Number());
+}
+
+
+// static
+MaybeHandle<Object> Object::ShiftLeft(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs)
+ << (NumberToUint32(*rhs) & 0x1F));
+}
+
+
+// static
+MaybeHandle<Object> Object::ShiftRight(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) >>
+ (NumberToUint32(*rhs) & 0x1F));
+}
+
+
+// static
+MaybeHandle<Object> Object::ShiftRightLogical(Isolate* isolate,
+ Handle<Object> lhs,
+ Handle<Object> rhs,
+ Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumberFromUint(NumberToUint32(*lhs) >>
+ (NumberToUint32(*rhs) & 0x1F));
+}
+
+
+// static
+MaybeHandle<Object> Object::BitwiseAnd(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
}
- return fun->IsJSFunction() ||
- (fun->IsHeapObject() &&
- HeapObject::cast(fun)->map()->has_instance_call_handler());
+ return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) &
+ NumberToInt32(*rhs));
+}
+
+
+// static
+MaybeHandle<Object> Object::BitwiseOr(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) |
+ NumberToInt32(*rhs));
+}
+
+
+// static
+MaybeHandle<Object> Object::BitwiseXor(Isolate* isolate, Handle<Object> lhs,
+ Handle<Object> rhs, Strength strength) {
+ if (!lhs->IsNumber() || !rhs->IsNumber()) {
+ if (is_strong(strength)) {
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kStrongImplicitConversion),
+ Object);
+ }
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, lhs, Object::ToNumber(lhs), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, rhs, Object::ToNumber(rhs), Object);
+ }
+ return isolate->factory()->NewNumberFromInt(NumberToInt32(*lhs) ^
+ NumberToInt32(*rhs));
}
@@ -139,12 +596,32 @@ bool Object::IsPromise(Handle<Object> object) {
auto isolate = js_object->GetIsolate();
// TODO(dcarney): this should just be read from the symbol registry so as not
// to be context dependent.
- auto key = isolate->promise_status();
+ auto key = isolate->factory()->promise_status_symbol();
// Shouldn't be possible to throw here.
return JSObject::HasRealNamedProperty(js_object, key).FromJust();
}
+// static
+MaybeHandle<Object> Object::GetMethod(Handle<JSReceiver> receiver,
+ Handle<Name> name) {
+ Handle<Object> func;
+ Isolate* isolate = receiver->GetIsolate();
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, func,
+ JSReceiver::GetProperty(receiver, name), Object);
+ if (func->IsNull() || func->IsUndefined()) {
+ return isolate->factory()->undefined_value();
+ }
+ if (!func->IsCallable()) {
+ // TODO(bmeurer): Better error message here?
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCalledNonCallable, func),
+ Object);
+ }
+ return func;
+}
+
+
MaybeHandle<Object> Object::GetProperty(LookupIterator* it,
LanguageMode language_mode) {
for (; it->IsFound(); it->Next()) {
@@ -365,7 +842,7 @@ MaybeHandle<Object> Object::GetPropertyWithAccessor(
// Regular accessor.
Handle<Object> getter(AccessorPair::cast(*structure)->getter(), isolate);
- if (getter->IsSpecFunction()) {
+ if (getter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return Object::GetPropertyWithDefinedGetter(
receiver, Handle<JSReceiver>::cast(getter));
@@ -421,7 +898,7 @@ MaybeHandle<Object> Object::SetPropertyWithAccessor(
// Regular accessor.
Handle<Object> setter(AccessorPair::cast(*structure)->setter(), isolate);
- if (setter->IsSpecFunction()) {
+ if (setter->IsCallable()) {
// TODO(rossberg): nicer would be to cast to some JSCallable here...
return SetPropertyWithDefinedSetter(
receiver, Handle<JSReceiver>::cast(setter), value);
@@ -460,7 +937,7 @@ MaybeHandle<Object> Object::GetPropertyWithDefinedGetter(
// TODO(rossberg): should this apply to getters that are function proxies?
if (debug->is_active()) debug->HandleStepIn(getter, false);
- return Execution::Call(isolate, getter, receiver, 0, NULL, true);
+ return Execution::Call(isolate, getter, receiver, 0, NULL);
}
@@ -477,7 +954,7 @@ MaybeHandle<Object> Object::SetPropertyWithDefinedSetter(
Handle<Object> argv[] = { value };
RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver,
- arraysize(argv), argv, true),
+ arraysize(argv), argv),
Object);
return value;
}
@@ -1473,6 +1950,73 @@ void HeapNumber::HeapNumberPrint(std::ostream& os) { // NOLINT
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
+// static
+Handle<String> Simd128Value::ToString(Handle<Simd128Value> input) {
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+ if (input->Is##Type()) return Type::ToString(Handle<Type>::cast(input));
+ SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+ UNREACHABLE();
+ return Handle<String>::null();
+}
+
+
+// static
+Handle<String> Float32x4::ToString(Handle<Float32x4> input) {
+ Isolate* const isolate = input->GetIsolate();
+ char arr[100];
+ Vector<char> buffer(arr, arraysize(arr));
+ std::ostringstream os;
+ os << "SIMD.Float32x4("
+ << std::string(DoubleToCString(input->get_lane(0), buffer)) << ", "
+ << std::string(DoubleToCString(input->get_lane(1), buffer)) << ", "
+ << std::string(DoubleToCString(input->get_lane(2), buffer)) << ", "
+ << std::string(DoubleToCString(input->get_lane(3), buffer)) << ")";
+ return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str());
+}
+
+
+#define SIMD128_BOOL_TO_STRING(Type, lane_count) \
+ Handle<String> Type::ToString(Handle<Type> input) { \
+ Isolate* const isolate = input->GetIsolate(); \
+ std::ostringstream os; \
+ os << "SIMD." #Type "("; \
+ os << (input->get_lane(0) ? "true" : "false"); \
+ for (int i = 1; i < lane_count; i++) { \
+ os << ", " << (input->get_lane(i) ? "true" : "false"); \
+ } \
+ os << ")"; \
+ return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str()); \
+ }
+SIMD128_BOOL_TO_STRING(Bool32x4, 4)
+SIMD128_BOOL_TO_STRING(Bool16x8, 8)
+SIMD128_BOOL_TO_STRING(Bool8x16, 16)
+#undef SIMD128_BOOL_TO_STRING
+
+
+#define SIMD128_INT_TO_STRING(Type, lane_count) \
+ Handle<String> Type::ToString(Handle<Type> input) { \
+ Isolate* const isolate = input->GetIsolate(); \
+ char arr[100]; \
+ Vector<char> buffer(arr, arraysize(arr)); \
+ std::ostringstream os; \
+ os << "SIMD." #Type "("; \
+ os << IntToCString(input->get_lane(0), buffer); \
+ for (int i = 1; i < lane_count; i++) { \
+ os << ", " << IntToCString(input->get_lane(i), buffer); \
+ } \
+ os << ")"; \
+ return isolate->factory()->NewStringFromAsciiChecked(os.str().c_str()); \
+ }
+SIMD128_INT_TO_STRING(Int32x4, 4)
+SIMD128_INT_TO_STRING(Uint32x4, 4)
+SIMD128_INT_TO_STRING(Int16x8, 8)
+SIMD128_INT_TO_STRING(Uint16x8, 8)
+SIMD128_INT_TO_STRING(Int8x16, 16)
+SIMD128_INT_TO_STRING(Uint8x16, 16)
+#undef SIMD128_INT_TO_STRING
+
+
bool Simd128Value::BitwiseEquals(const Simd128Value* other) const {
return READ_INT64_FIELD(this, kValueOffset) ==
READ_INT64_FIELD(other, kValueOffset) &&
@@ -1776,6 +2320,8 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map,
if (!new_map->is_dictionary_map()) {
MigrateFastToFast(object, new_map);
if (old_map->is_prototype_map()) {
+ DCHECK(!old_map->is_stable());
+ DCHECK(new_map->is_stable());
// Clear out the old descriptor array to avoid problems to sharing
// the descriptor array without using an explicit.
old_map->InitializeDescriptors(
@@ -3280,8 +3826,7 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
if (it->IsElement() && receiver->HasFixedTypedArrayElements()) {
if (!value->IsNumber() && !value->IsUndefined()) {
ASSIGN_RETURN_ON_EXCEPTION(it->isolate(), to_assign,
- Execution::ToNumber(it->isolate(), value),
- Object);
+ Object::ToNumber(value), Object);
// ToNumber above might modify the receiver, causing the cached
// holder_map to mismatch the actual holder->map() after this point.
// Reload the map to be in consistent state. Other cached state cannot
@@ -3723,6 +4268,7 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
Object* maybe_array_maps = map->is_strong()
? native_context->js_array_strong_maps()
: native_context->js_array_maps();
+ // Reuse map transitions for JSArrays.
if (maybe_array_maps->IsFixedArray()) {
DisallowHeapAllocation no_gc;
FixedArray* array_maps = FixedArray::cast(maybe_array_maps);
@@ -3736,6 +4282,14 @@ Handle<Map> Map::TransitionElementsTo(Handle<Map> map,
}
DCHECK(!map->IsUndefined());
+ // Check if we can go back in the elements kind transition chain.
+ if (IsHoleyElementsKind(from_kind) &&
+ to_kind == GetPackedElementsKind(from_kind) &&
+ map->GetBackPointer()->IsMap() &&
+ Map::cast(map->GetBackPointer())->elements_kind() == to_kind) {
+ return handle(Map::cast(map->GetBackPointer()));
+ }
+
bool allow_store_transition = IsTransitionElementsKind(from_kind);
// Only store fast element maps in ascending generality.
if (IsFastElementsKind(to_kind)) {
@@ -4064,9 +4618,8 @@ void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
ElementsKind obj_kind = object->map()->elements_kind();
ElementsKind map_kind = map->elements_kind();
if (map_kind != obj_kind) {
- ElementsKind to_kind = map_kind;
- if (IsMoreGeneralElementsKindTransition(map_kind, obj_kind) ||
- IsDictionaryElementsKind(obj_kind)) {
+ ElementsKind to_kind = GetMoreGeneralElementsKind(map_kind, obj_kind);
+ if (IsDictionaryElementsKind(obj_kind)) {
to_kind = obj_kind;
}
if (IsDictionaryElementsKind(to_kind)) {
@@ -4687,16 +5240,12 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
int index = Smi::cast(iteration_order->get(i))->value();
Object* k = dictionary->KeyAt(index);
DCHECK(dictionary->IsKey(k));
+ // Dictionary keys are internalized upon insertion.
+ // TODO(jkummerow): Turn this into a DCHECK if it's not hit in the wild.
+ CHECK(k->IsUniqueName());
+ Handle<Name> key(Name::cast(k), isolate);
Object* value = dictionary->ValueAt(index);
- Handle<Name> key;
- if (k->IsSymbol()) {
- key = handle(Symbol::cast(k));
- } else {
- // Ensure the key is a unique name before writing into the
- // instance descriptor.
- key = factory->InternalizeString(handle(String::cast(k)));
- }
PropertyDetails details = dictionary->DetailsAt(index);
int enumeration_index = details.dictionary_index();
@@ -4796,7 +5345,7 @@ void JSObject::RequireSlowElements(SeededNumberDictionary* dictionary) {
dictionary->set_requires_slow_elements();
// TODO(verwaest): Remove this hack.
if (map()->is_prototype_map()) {
- GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ GetHeap()->ClearAllKeyedStoreICs();
}
}
@@ -4959,7 +5508,7 @@ Object* JSObject::GetHiddenProperty(Handle<Name> key) {
// If the proxy is detached, return undefined.
if (iter.IsAtEnd()) return GetHeap()->the_hole_value();
DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return JSObject::cast(iter.GetCurrent())->GetHiddenProperty(key);
+ return iter.GetCurrent<JSObject>()->GetHiddenProperty(key);
}
DCHECK(!IsJSGlobalProxy());
Object* inline_value = GetHiddenPropertiesHashTable();
@@ -4984,9 +5533,8 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> object,
// If the proxy is detached, return undefined.
if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return SetHiddenProperty(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), key,
- value);
+ return SetHiddenProperty(PrototypeIterator::GetCurrent<JSObject>(iter), key,
+ value);
}
DCHECK(!object->IsJSGlobalProxy());
@@ -5017,8 +5565,8 @@ void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
PrototypeIterator iter(isolate, object);
if (iter.IsAtEnd()) return;
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return DeleteHiddenProperty(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), key);
+ return DeleteHiddenProperty(PrototypeIterator::GetCurrent<JSObject>(iter),
+ key);
}
Object* inline_value = object->GetHiddenPropertiesHashTable();
@@ -5397,13 +5945,13 @@ bool JSObject::ReferencesObject(Object* obj) {
// Check the context extension (if any) if it can have references.
if (context->has_extension() && !context->IsCatchContext()) {
- // With harmony scoping, a JSFunction may have a global context.
+ // With harmony scoping, a JSFunction may have a script context.
// TODO(mvstanton): walk into the ScopeInfo.
if (context->IsScriptContext()) {
return false;
}
- return JSObject::cast(context->extension())->ReferencesObject(obj);
+ return context->extension_object()->ReferencesObject(obj);
}
}
@@ -5431,8 +5979,7 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
PrototypeIterator iter(isolate, object);
if (iter.IsAtEnd()) return object;
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
- return PreventExtensions(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
+ return PreventExtensions(PrototypeIterator::GetCurrent<JSObject>(iter));
}
// It's not possible to seal objects with external array elements
@@ -5474,7 +6021,7 @@ bool JSObject::IsExtensible() {
PrototypeIterator iter(GetIsolate(), this);
if (iter.IsAtEnd()) return false;
DCHECK(iter.GetCurrent()->IsJSGlobalObject());
- return JSObject::cast(iter.GetCurrent())->map()->is_extensible();
+ return iter.GetCurrent<JSObject>()->map()->is_extensible();
}
return map()->is_extensible();
}
@@ -5525,7 +6072,7 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
if (iter.IsAtEnd()) return object;
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return PreventExtensionsWithTransition<attrs>(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
+ PrototypeIterator::GetCurrent<JSObject>(iter));
}
// It's not possible to seal or freeze objects with external array elements
@@ -5900,6 +6447,76 @@ MaybeHandle<JSObject> JSObject::DeepCopy(
}
+// static
+MaybeHandle<Object> JSReceiver::ToPrimitive(Handle<JSReceiver> receiver,
+ ToPrimitiveHint hint) {
+ Isolate* const isolate = receiver->GetIsolate();
+ Handle<Object> exotic_to_prim;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, exotic_to_prim,
+ GetMethod(receiver, isolate->factory()->to_primitive_symbol()), Object);
+ if (!exotic_to_prim->IsUndefined()) {
+ Handle<Object> hint_string;
+ switch (hint) {
+ case ToPrimitiveHint::kDefault:
+ hint_string = isolate->factory()->default_string();
+ break;
+ case ToPrimitiveHint::kNumber:
+ hint_string = isolate->factory()->number_string();
+ break;
+ case ToPrimitiveHint::kString:
+ hint_string = isolate->factory()->string_string();
+ break;
+ }
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, exotic_to_prim, receiver, 1, &hint_string),
+ Object);
+ if (result->IsPrimitive()) return result;
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
+ Object);
+ }
+ return OrdinaryToPrimitive(receiver, (hint == ToPrimitiveHint::kString)
+ ? OrdinaryToPrimitiveHint::kString
+ : OrdinaryToPrimitiveHint::kNumber);
+}
+
+
+// static
+MaybeHandle<Object> JSReceiver::OrdinaryToPrimitive(
+ Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint) {
+ Isolate* const isolate = receiver->GetIsolate();
+ Handle<String> method_names[2];
+ switch (hint) {
+ case OrdinaryToPrimitiveHint::kNumber:
+ method_names[0] = isolate->factory()->valueOf_string();
+ method_names[1] = isolate->factory()->toString_string();
+ break;
+ case OrdinaryToPrimitiveHint::kString:
+ method_names[0] = isolate->factory()->toString_string();
+ method_names[1] = isolate->factory()->valueOf_string();
+ break;
+ }
+ for (Handle<String> name : method_names) {
+ Handle<Object> method;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, method,
+ JSReceiver::GetProperty(receiver, name), Object);
+ if (method->IsCallable()) {
+ Handle<Object> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, method, receiver, 0, NULL),
+ Object);
+ if (result->IsPrimitive()) return result;
+ }
+ }
+ THROW_NEW_ERROR(isolate,
+ NewTypeError(MessageTemplate::kCannotConvertToPrimitive),
+ Object);
+}
+
+
// Tests for the fast common case for property enumeration:
// - This object and all prototypes has an enum cache (which means that
// it is no proxy, has no interceptors and needs no access checks).
@@ -5910,14 +6527,14 @@ bool JSReceiver::IsSimpleEnum() {
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (!iter.GetCurrent()->IsJSObject()) return false;
- JSObject* curr = JSObject::cast(iter.GetCurrent());
- int enum_length = curr->map()->EnumLength();
+ JSObject* current = iter.GetCurrent<JSObject>();
+ int enum_length = current->map()->EnumLength();
if (enum_length == kInvalidEnumCacheSentinel) return false;
- if (curr->IsAccessCheckNeeded()) return false;
- DCHECK(!curr->HasNamedInterceptor());
- DCHECK(!curr->HasIndexedInterceptor());
- if (curr->NumberOfEnumElements() > 0) return false;
- if (curr != this && enum_length != 0) return false;
+ if (current->IsAccessCheckNeeded()) return false;
+ DCHECK(!current->HasNamedInterceptor());
+ DCHECK(!current->HasIndexedInterceptor());
+ if (current->NumberOfEnumElements() > 0) return false;
+ if (current != this && enum_length != 0) return false;
}
return true;
}
@@ -6102,11 +6719,123 @@ Handle<FixedArray> JSObject::GetEnumPropertyKeys(Handle<JSObject> object,
}
+Handle<FixedArray> KeyAccumulator::GetKeys() {
+ if (length_ == 0) {
+ return isolate_->factory()->empty_fixed_array();
+ }
+ if (set_.is_null()) {
+ keys_->Shrink(length_);
+ return keys_;
+ }
+ // copy over results from set_
+ Handle<FixedArray> result = isolate_->factory()->NewFixedArray(length_);
+ for (int i = 0; i < length_; i++) {
+ result->set(i, set_->KeyAt(i));
+ }
+ return result;
+}
+
+
+void KeyAccumulator::AddKey(Handle<Object> key, int check_limit) {
+#ifdef ENABLE_SLOW_DCHECKS
+ if (FLAG_enable_slow_asserts) {
+ DCHECK(key->IsNumber() || key->IsName());
+ }
+#endif
+ if (!set_.is_null()) {
+ set_ = OrderedHashSet::Add(set_, key);
+ length_ = set_->NumberOfElements();
+ return;
+ }
+ // check if we already have the key in the case we are still using
+ // the keys_ FixedArray
+ check_limit = Min(check_limit, length_);
+ for (int i = 0; i < check_limit; i++) {
+ Object* current = keys_->get(i);
+ if (current->KeyEquals(*key)) return;
+ }
+ EnsureCapacity(length_);
+ keys_->set(length_, *key);
+ length_++;
+}
+
+
+void KeyAccumulator::AddKeys(Handle<FixedArray> array,
+ FixedArray::KeyFilter filter) {
+ int add_length = array->length();
+ if (add_length == 0) return;
+ if (keys_.is_null() && filter == FixedArray::ALL_KEYS) {
+ keys_ = array;
+ length_ = keys_->length();
+ return;
+ }
+ PrepareForComparisons(add_length);
+ int previous_key_count = length_;
+ for (int i = 0; i < add_length; i++) {
+ Handle<Object> current(array->get(i), isolate_);
+ if (filter == FixedArray::NON_SYMBOL_KEYS && current->IsSymbol()) continue;
+ AddKey(current, previous_key_count);
+ }
+}
+
+
+void KeyAccumulator::AddKeys(Handle<JSObject> array_like,
+ FixedArray::KeyFilter filter) {
+ DCHECK(array_like->IsJSArray() || array_like->HasSloppyArgumentsElements());
+ ElementsAccessor* accessor = array_like->GetElementsAccessor();
+ accessor->AddElementsToKeyAccumulator(array_like, this, filter);
+}
+
+
+void KeyAccumulator::PrepareForComparisons(int count) {
+ // Depending on how many comparisons we do we should switch to the
+ // hash-table-based checks which have a one-time overhead for
+ // initializing but O(1) for HasKey checks.
+ if (!set_.is_null()) return;
+ // This limit was obtained through evaluation of a microbench.
+ if (length_ * count < 50) return;
+ set_ = OrderedHashSet::Allocate(isolate_, length_);
+ for (int i = 0; i < length_; i++) {
+ Handle<Object> value(keys_->get(i), isolate_);
+ set_ = OrderedHashSet::Add(set_, value);
+ }
+}
+
+
+void KeyAccumulator::EnsureCapacity(int capacity) {
+ if (keys_.is_null() || keys_->length() <= capacity) {
+ Grow();
+ }
+}
+
+
+void KeyAccumulator::Grow() {
+ // The OrderedHashSet handles growing by itself.
+ if (!set_.is_null()) return;
+ // Otherwise, grow the internal keys_ FixedArray
+ int capacity = keys_.is_null() ? 16 : keys_->length() * 2 + 16;
+ Handle<FixedArray> new_keys = isolate_->factory()->NewFixedArray(capacity);
+ if (keys_.is_null()) {
+ keys_ = new_keys;
+ return;
+ }
+ int buffer_length = keys_->length();
+ {
+ DisallowHeapAllocation no_gc;
+ WriteBarrierMode mode = new_keys->GetWriteBarrierMode(no_gc);
+ for (int i = 0; i < buffer_length; i++) {
+ new_keys->set(i, keys_->get(i), mode);
+ }
+ }
+ keys_ = new_keys;
+}
+
+
MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
KeyCollectionType type) {
USE(ContainsOnlyValidKeys);
Isolate* isolate = object->GetIsolate();
- Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
+ KeyAccumulator accumulator(isolate);
Handle<JSFunction> arguments_function(
JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
@@ -6118,8 +6847,7 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(end); iter.Advance()) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- Handle<JSProxy> proxy(JSProxy::cast(*PrototypeIterator::GetCurrent(iter)),
- isolate);
+ Handle<JSProxy> proxy = PrototypeIterator::GetCurrent<JSProxy>(iter);
Handle<Object> args[] = { proxy };
Handle<Object> names;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -6130,16 +6858,11 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
arraysize(args),
args),
FixedArray);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, content,
- FixedArray::AddKeysFromArrayLike(
- content, Handle<JSObject>::cast(names)),
- FixedArray);
+ accumulator.AddKeys(Handle<JSObject>::cast(names), FixedArray::ALL_KEYS);
break;
}
- Handle<JSObject> current =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
// Check access rights if required.
if (current->IsAccessCheckNeeded() && !isolate->MayAccess(current)) {
@@ -6154,23 +6877,17 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
Handle<FixedArray> element_keys =
isolate->factory()->NewFixedArray(current->NumberOfEnumElements());
current->GetEnumElementKeys(*element_keys);
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, content,
- FixedArray::UnionOfKeys(content, element_keys),
- FixedArray);
- DCHECK(ContainsOnlyValidKeys(content));
+ accumulator.AddKeys(element_keys, FixedArray::ALL_KEYS);
+ DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
// Add the element keys from the interceptor.
if (current->HasIndexedInterceptor()) {
Handle<JSObject> result;
if (JSObject::GetKeysForIndexedInterceptor(
current, object).ToHandle(&result)) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, content,
- FixedArray::AddKeysFromArrayLike(content, result),
- FixedArray);
+ accumulator.AddKeys(result, FixedArray::ALL_KEYS);
}
- DCHECK(ContainsOnlyValidKeys(content));
+ DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
}
// We can cache the computed property keys if access checks are
@@ -6188,27 +6905,26 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
!current->IsJSValue() && !current->IsAccessCheckNeeded() &&
!current->HasNamedInterceptor() && !current->HasIndexedInterceptor());
// Compute the property keys and cache them if possible.
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, content,
- FixedArray::UnionOfKeys(
- content, JSObject::GetEnumPropertyKeys(current, cache_enum_keys)),
- FixedArray);
- DCHECK(ContainsOnlyValidKeys(content));
+
+ Handle<FixedArray> enum_keys =
+ JSObject::GetEnumPropertyKeys(current, cache_enum_keys);
+ accumulator.AddKeys(enum_keys, FixedArray::ALL_KEYS);
+ DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
// Add the non-symbol property keys from the interceptor.
if (current->HasNamedInterceptor()) {
Handle<JSObject> result;
if (JSObject::GetKeysForNamedInterceptor(
current, object).ToHandle(&result)) {
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, content, FixedArray::AddKeysFromArrayLike(
- content, result, FixedArray::NON_SYMBOL_KEYS),
- FixedArray);
+ accumulator.AddKeys(result, FixedArray::NON_SYMBOL_KEYS);
}
- DCHECK(ContainsOnlyValidKeys(content));
+ DCHECK(ContainsOnlyValidKeys(accumulator.GetKeys()));
}
}
- return content;
+
+ Handle<FixedArray> keys = accumulator.GetKeys();
+ DCHECK(ContainsOnlyValidKeys(keys));
+ return keys;
}
@@ -6222,7 +6938,7 @@ bool Map::DictionaryElementsInPrototypeChainOnly() {
if (iter.GetCurrent()->IsJSProxy()) return true;
// String wrappers have non-configurable, non-writable elements.
if (iter.GetCurrent()->IsStringWrapper()) return true;
- JSObject* current = JSObject::cast(iter.GetCurrent());
+ JSObject* current = iter.GetCurrent<JSObject>();
if (current->HasDictionaryElements() &&
current->element_dictionary()->requires_slow_elements()) {
@@ -6279,8 +6995,8 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
}
}
- DCHECK(getter->IsSpecFunction() || getter->IsUndefined() || getter->IsNull());
- DCHECK(setter->IsSpecFunction() || setter->IsUndefined() || setter->IsNull());
+ DCHECK(getter->IsCallable() || getter->IsUndefined() || getter->IsNull());
+ DCHECK(setter->IsCallable() || setter->IsUndefined() || setter->IsNull());
// At least one of the accessors needs to be a new value.
DCHECK(!getter->IsNull() || !setter->IsNull());
if (!getter->IsNull()) {
@@ -6727,7 +7443,7 @@ Handle<Map> Map::CopyInstallDescriptors(
#else
SLOW_DCHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
#endif
- result->set_visitor_id(StaticVisitorBase::GetVisitorId(*result));
+ result->set_visitor_id(Heap::GetStaticVisitorIdForMap(*result));
}
Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
@@ -6864,7 +7580,7 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
copy->SetInObjectProperties(inobject_properties);
copy->set_unused_property_fields(inobject_properties);
copy->set_instance_size(new_instance_size);
- copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
+ copy->set_visitor_id(Heap::GetStaticVisitorIdForMap(*copy));
return copy;
}
@@ -7709,53 +8425,6 @@ void FixedArray::Shrink(int new_length) {
}
-MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike(
- Handle<FixedArray> content, Handle<JSObject> array, KeyFilter filter) {
- DCHECK(array->IsJSArray() || array->HasSloppyArgumentsElements());
- ElementsAccessor* accessor = array->GetElementsAccessor();
- Handle<FixedArray> result =
- accessor->AddElementsToFixedArray(array, content, filter);
-
-#ifdef ENABLE_SLOW_DCHECKS
- if (FLAG_enable_slow_asserts) {
- DisallowHeapAllocation no_allocation;
- for (int i = 0; i < result->length(); i++) {
- Object* current = result->get(i);
- DCHECK(current->IsNumber() || current->IsName());
- }
- }
-#endif
- return result;
-}
-
-
-MaybeHandle<FixedArray> FixedArray::UnionOfKeys(Handle<FixedArray> first,
- Handle<FixedArray> second) {
- if (second->length() == 0) return first;
- if (first->length() == 0) return second;
- Isolate* isolate = first->GetIsolate();
- Handle<FixedArray> result =
- isolate->factory()->NewFixedArray(first->length() + second->length());
- for (int i = 0; i < first->length(); i++) {
- result->set(i, first->get(i));
- }
- int pos = first->length();
- for (int j = 0; j < second->length(); j++) {
- Object* current = second->get(j);
- int i;
- for (i = 0; i < first->length(); i++) {
- if (current->KeyEquals(first->get(i))) break;
- }
- if (i == first->length()) {
- result->set(pos++, current);
- }
- }
-
- result->Shrink(pos);
- return result;
-}
-
-
void FixedArray::CopyTo(int pos, FixedArray* dest, int dest_pos, int len) {
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = dest->GetWriteBarrierMode(no_gc);
@@ -7849,6 +8518,17 @@ void WeakFixedArray::Compact() {
}
+void WeakFixedArray::Iterator::Reset(Object* maybe_array) {
+ if (maybe_array->IsWeakFixedArray()) {
+ list_ = WeakFixedArray::cast(maybe_array);
+ index_ = 0;
+#ifdef DEBUG
+ last_used_index_ = list_->last_used_index();
+#endif // DEBUG
+ }
+}
+
+
void JSObject::PrototypeRegistryCompactionCallback::Callback(Object* value,
int old_index,
int new_index) {
@@ -8102,6 +8782,19 @@ Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
}
+// static
+Handle<LiteralsArray> LiteralsArray::New(Isolate* isolate,
+ Handle<TypeFeedbackVector> vector,
+ int number_of_literals,
+ PretenureFlag pretenure) {
+ Handle<FixedArray> literals = isolate->factory()->NewFixedArray(
+ number_of_literals + kFirstLiteralIndex, pretenure);
+ Handle<LiteralsArray> casted_literals = Handle<LiteralsArray>::cast(literals);
+ casted_literals->set_feedback_vector(*vector);
+ return casted_literals;
+}
+
+
int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out,
CatchPrediction* prediction_out) {
int innermost_handler = -1, innermost_start = -1;
@@ -8160,6 +8853,110 @@ bool String::LooksValid() {
}
+// static
+MaybeHandle<String> Name::ToFunctionName(Handle<Name> name) {
+ if (name->IsString()) return Handle<String>::cast(name);
+ // ES6 section 9.2.11 SetFunctionName, step 4.
+ Isolate* const isolate = name->GetIsolate();
+ Handle<Object> description(Handle<Symbol>::cast(name)->name(), isolate);
+ if (description->IsUndefined()) return isolate->factory()->empty_string();
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCharacter('[');
+ builder.AppendString(Handle<String>::cast(description));
+ builder.AppendCharacter(']');
+ return builder.Finish();
+}
+
+
+namespace {
+
+bool AreDigits(const uint8_t* s, int from, int to) {
+ for (int i = from; i < to; i++) {
+ if (s[i] < '0' || s[i] > '9') return false;
+ }
+
+ return true;
+}
+
+
+int ParseDecimalInteger(const uint8_t* s, int from, int to) {
+ DCHECK(to - from < 10); // Overflow is not possible.
+ DCHECK(from < to);
+ int d = s[from] - '0';
+
+ for (int i = from + 1; i < to; i++) {
+ d = 10 * d + (s[i] - '0');
+ }
+
+ return d;
+}
+
+} // namespace
+
+
+// static
+Handle<Object> String::ToNumber(Handle<String> subject) {
+ Isolate* const isolate = subject->GetIsolate();
+
+ // Flatten {subject} string first.
+ subject = String::Flatten(subject);
+
+ // Fast array index case.
+ uint32_t index;
+ if (subject->AsArrayIndex(&index)) {
+ return isolate->factory()->NewNumberFromUint(index);
+ }
+
+ // Fast case: short integer or some sorts of junk values.
+ if (subject->IsSeqOneByteString()) {
+ int len = subject->length();
+ if (len == 0) return handle(Smi::FromInt(0), isolate);
+
+ DisallowHeapAllocation no_gc;
+ uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
+ bool minus = (data[0] == '-');
+ int start_pos = (minus ? 1 : 0);
+
+ if (start_pos == len) {
+ return isolate->factory()->nan_value();
+ } else if (data[start_pos] > '9') {
+ // Fast check for a junk value. A valid string may start from a
+ // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
+ // or the 'I' character ('Infinity'). All of that have codes not greater
+ // than '9' except 'I' and &nbsp;.
+ if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
+ return isolate->factory()->nan_value();
+ }
+ } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
+ // The maximal/minimal smi has 10 digits. If the string has less digits
+ // we know it will fit into the smi-data type.
+ int d = ParseDecimalInteger(data, start_pos, len);
+ if (minus) {
+ if (d == 0) return isolate->factory()->minus_zero_value();
+ d = -d;
+ } else if (!subject->HasHashCode() && len <= String::kMaxArrayIndexSize &&
+ (len == 1 || data[0] != '0')) {
+ // String hash is not calculated yet but all the data are present.
+ // Update the hash field to speed up sequential convertions.
+ uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
+#ifdef DEBUG
+ subject->Hash(); // Force hash calculation.
+ DCHECK_EQ(static_cast<int>(subject->hash_field()),
+ static_cast<int>(hash));
+#endif
+ subject->set_hash_field(hash);
+ }
+ return handle(Smi::FromInt(d), isolate);
+ }
+ }
+
+ // Slower case.
+ int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
+ return isolate->factory()->NewNumber(
+ StringToDouble(isolate->unicode_cache(), subject, flags));
+}
+
+
String::FlatContent String::GetFlatContent() {
DCHECK(!AllowHeapAllocation::IsAllowed());
int length = this->length();
@@ -8948,6 +9745,69 @@ bool String::SlowEquals(Handle<String> one, Handle<String> two) {
}
+// static
+ComparisonResult String::Compare(Handle<String> x, Handle<String> y) {
+ // A few fast case tests before we flatten.
+ if (x.is_identical_to(y)) {
+ return ComparisonResult::kEqual;
+ } else if (y->length() == 0) {
+ return x->length() == 0 ? ComparisonResult::kEqual
+ : ComparisonResult::kGreaterThan;
+ } else if (x->length() == 0) {
+ return ComparisonResult::kLessThan;
+ }
+
+ int const d = x->Get(0) - y->Get(0);
+ if (d < 0) {
+ return ComparisonResult::kLessThan;
+ } else if (d > 0) {
+ return ComparisonResult::kGreaterThan;
+ }
+
+ // Slow case.
+ x = String::Flatten(x);
+ y = String::Flatten(y);
+
+ DisallowHeapAllocation no_gc;
+ ComparisonResult result = ComparisonResult::kEqual;
+ int prefix_length = x->length();
+ if (y->length() < prefix_length) {
+ prefix_length = y->length();
+ result = ComparisonResult::kGreaterThan;
+ } else if (y->length() > prefix_length) {
+ result = ComparisonResult::kLessThan;
+ }
+ int r;
+ String::FlatContent x_content = x->GetFlatContent();
+ String::FlatContent y_content = y->GetFlatContent();
+ if (x_content.IsOneByte()) {
+ Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
+ if (y_content.IsOneByte()) {
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y_content.ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ } else {
+ Vector<const uc16> x_chars = x_content.ToUC16Vector();
+ if (y_content.IsOneByte()) {
+ Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ } else {
+ Vector<const uc16> y_chars = y_content.ToUC16Vector();
+ r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
+ }
+ }
+ if (r < 0) {
+ result = ComparisonResult::kLessThan;
+ } else if (r > 0) {
+ result = ComparisonResult::kGreaterThan;
+ }
+ return result;
+}
+
+
bool String::IsUtf8EqualTo(Vector<const char> str, bool allow_prefix_match) {
int slen = length();
// Can't check exact length equality, but we can check bounds.
@@ -9068,16 +9928,9 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
DCHECK_OBJECT_ALIGNED(start_of_string + new_size);
Heap* heap = string->GetHeap();
- NewSpace* newspace = heap->new_space();
- if (newspace->Contains(start_of_string) &&
- newspace->top() == start_of_string + old_size) {
- // Last allocated object in new space. Simply lower allocation top.
- newspace->set_top(start_of_string + new_size);
- } else {
- // Sizes are pointer size aligned, so that we can use filler objects
- // that are a multiple of pointer size.
- heap->CreateFillerObjectAt(start_of_string + new_size, delta);
- }
+ // Sizes are pointer size aligned, so that we can use filler objects
+ // that are a multiple of pointer size.
+ heap->CreateFillerObjectAt(start_of_string + new_size, delta);
heap->AdjustLiveBytes(*string, -delta, Heap::CONCURRENT_TO_SWEEPER);
// We are storing the new length using release store after creating a filler
@@ -9230,17 +10083,20 @@ int Map::Hash() {
}
-static bool CheckEquivalent(Map* first, Map* second) {
+namespace {
+
+bool CheckEquivalent(Map* first, Map* second) {
return first->GetConstructor() == second->GetConstructor() &&
first->prototype() == second->prototype() &&
first->instance_type() == second->instance_type() &&
first->bit_field() == second->bit_field() &&
first->is_extensible() == second->is_extensible() &&
first->is_strong() == second->is_strong() &&
- first->has_instance_call_handler() ==
- second->has_instance_call_handler();
+ first->is_hidden_prototype() == second->is_hidden_prototype();
}
+} // namespace
+
bool Map::EquivalentToForTransition(Map* other) {
return CheckEquivalent(this, other);
@@ -9328,30 +10184,6 @@ void JSFunction::AttemptConcurrentOptimization() {
}
-Handle<JSFunction> JSFunction::CloneClosure(Handle<JSFunction> function) {
- Isolate* isolate = function->GetIsolate();
- Handle<Map> map(function->map());
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Context> context(function->context());
- Handle<JSFunction> clone =
- isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
-
- if (shared->bound()) {
- clone->set_function_bindings(function->function_bindings());
- }
-
- // In typical case, __proto__ of ``function`` is the default Function
- // prototype, which means that SetPrototype below is a no-op.
- // In rare cases when that is not true, we mutate the clone's __proto__.
- Handle<Object> original_prototype(map->prototype(), isolate);
- if (*original_prototype != clone->map()->prototype()) {
- JSObject::SetPrototype(clone, original_prototype, false).Assert();
- }
-
- return clone;
-}
-
-
void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared, Handle<Code> code) {
Isolate* isolate = shared->GetIsolate();
@@ -9364,47 +10196,60 @@ void SharedFunctionInfo::AddSharedCodeToOptimizedCodeMap(
void SharedFunctionInfo::AddToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<Code> code,
- Handle<FixedArray> literals,
+ Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
+ Handle<HeapObject> code, Handle<LiteralsArray> literals,
BailoutId osr_ast_id) {
Isolate* isolate = shared->GetIsolate();
- DCHECK(!shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
+ DCHECK(*code == isolate->heap()->undefined_value() ||
+ !shared->SearchOptimizedCodeMap(*native_context, osr_ast_id).code);
+ DCHECK(*code == isolate->heap()->undefined_value() ||
+ Code::cast(*code)->kind() == Code::OPTIMIZED_FUNCTION);
DCHECK(native_context->IsNativeContext());
STATIC_ASSERT(kEntryLength == 4);
Handle<FixedArray> new_code_map;
Handle<Object> value(shared->optimized_code_map(), isolate);
- int old_length;
+ int entry;
if (value->IsSmi()) {
// No optimized code map.
DCHECK_EQ(0, Smi::cast(*value)->value());
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
- old_length = kEntriesStart;
+ entry = kEntriesStart;
} else {
- // Copy old optimized code map and append one new entry.
Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
+ entry = shared->SearchOptimizedCodeMapEntry(*native_context, osr_ast_id);
+ if (entry > kSharedCodeIndex) {
+ // Found an existing context-specific entry, it must not contain any code.
+ DCHECK_EQ(isolate->heap()->undefined_value(),
+ old_code_map->get(entry + kCachedCodeOffset));
+ // Just set the code and literals to the entry.
+ old_code_map->set(entry + kCachedCodeOffset, *code);
+ old_code_map->set(entry + kLiteralsOffset, *literals);
+ return;
+ }
+
+ // Copy old optimized code map and append one new entry.
new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
old_code_map, kEntryLength, TENURED);
- old_length = old_code_map->length();
+ int old_length = old_code_map->length();
// Zap the old map to avoid any stale entries. Note that this is required
// for correctness because entries are being treated weakly by the GC.
MemsetPointer(old_code_map->data_start(), isolate->heap()->the_hole_value(),
old_length);
+ entry = old_length;
}
- new_code_map->set(old_length + kContextOffset, *native_context);
- new_code_map->set(old_length + kCachedCodeOffset, *code);
- new_code_map->set(old_length + kLiteralsOffset, *literals);
- new_code_map->set(old_length + kOsrAstIdOffset,
- Smi::FromInt(osr_ast_id.ToInt()));
+ new_code_map->set(entry + kContextOffset, *native_context);
+ new_code_map->set(entry + kCachedCodeOffset, *code);
+ new_code_map->set(entry + kLiteralsOffset, *literals);
+ new_code_map->set(entry + kOsrAstIdOffset, Smi::FromInt(osr_ast_id.ToInt()));
#ifdef DEBUG
for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
DCHECK(new_code_map->get(i + kContextOffset)->IsNativeContext());
- DCHECK(new_code_map->get(i + kCachedCodeOffset)->IsCode());
- DCHECK(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
- Code::OPTIMIZED_FUNCTION);
+ Object* code = new_code_map->get(i + kCachedCodeOffset);
+ if (code != isolate->heap()->undefined_value()) {
+ DCHECK(code->IsCode());
+ DCHECK(Code::cast(code)->kind() == Code::OPTIMIZED_FUNCTION);
+ }
DCHECK(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
}
@@ -9433,37 +10278,43 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
DisallowHeapAllocation no_gc;
if (optimized_code_map()->IsSmi()) return;
+ Heap* heap = GetHeap();
FixedArray* code_map = FixedArray::cast(optimized_code_map());
int dst = kEntriesStart;
int length = code_map->length();
for (int src = kEntriesStart; src < length; src += kEntryLength) {
DCHECK(code_map->get(src)->IsNativeContext());
- if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) {
- // Evict the src entry by not copying it to the dst entry.
+ if (code_map->get(src + kCachedCodeOffset) == optimized_code) {
+ BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (FLAG_trace_opt) {
PrintF("[evicting entry from optimizing code map (%s) for ", reason);
ShortPrint();
- BailoutId osr(Smi::cast(code_map->get(src + kOsrAstIdOffset))->value());
if (osr.IsNone()) {
PrintF("]\n");
} else {
PrintF(" (osr ast id %d)]\n", osr.ToInt());
}
}
- } else {
- // Keep the src entry by copying it to the dst entry.
- if (dst != src) {
- code_map->set(dst + kContextOffset,
- code_map->get(src + kContextOffset));
- code_map->set(dst + kCachedCodeOffset,
- code_map->get(src + kCachedCodeOffset));
- code_map->set(dst + kLiteralsOffset,
- code_map->get(src + kLiteralsOffset));
- code_map->set(dst + kOsrAstIdOffset,
- code_map->get(src + kOsrAstIdOffset));
+ if (!osr.IsNone()) {
+ // Evict the src entry by not copying it to the dst entry.
+ continue;
}
- dst += kEntryLength;
+ // In case of non-OSR entry just clear the code in order to proceed
+ // sharing literals.
+ code_map->set_undefined(src + kCachedCodeOffset);
+ }
+
+ // Keep the src entry by copying it to the dst entry.
+ if (dst != src) {
+ code_map->set(dst + kContextOffset, code_map->get(src + kContextOffset));
+ code_map->set(dst + kCachedCodeOffset,
+ code_map->get(src + kCachedCodeOffset));
+ code_map->set(dst + kLiteralsOffset,
+ code_map->get(src + kLiteralsOffset));
+ code_map->set(dst + kOsrAstIdOffset,
+ code_map->get(src + kOsrAstIdOffset));
}
+ dst += kEntryLength;
}
if (code_map->get(kSharedCodeIndex) == optimized_code) {
// Evict context-independent code as well.
@@ -9476,8 +10327,8 @@ void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
}
if (dst != length) {
// Always trim even when array is cleared because of heap verifier.
- GetHeap()->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
- length - dst);
+ heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(code_map,
+ length - dst);
if (code_map->length() == kEntriesStart &&
code_map->get(kSharedCodeIndex)->IsUndefined()) {
ClearOptimizedCodeMap();
@@ -9515,7 +10366,7 @@ static void ShrinkInstanceSize(Map* map, void* data) {
map->set_instance_size(map->instance_size() - slack * kPointerSize);
// Visitor id might depend on the instance size, recalculate it.
- map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+ map->set_visitor_id(Heap::GetStaticVisitorIdForMap(map));
}
@@ -9663,7 +10514,7 @@ bool JSObject::UnregisterPrototypeUser(Handle<Map> user, Isolate* isolate) {
if (slot == PrototypeInfo::UNREGISTERED) return false;
if (prototype->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, prototype);
- prototype = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ prototype = PrototypeIterator::GetCurrent<JSObject>(iter);
}
DCHECK(prototype->map()->is_prototype_map());
Object* maybe_proto_info = prototype->map()->prototype_info();
@@ -9699,16 +10550,10 @@ static void InvalidatePrototypeChainsInternal(Map* map) {
cell->set_value(Smi::FromInt(Map::kPrototypeChainInvalid));
}
- Object* maybe_array = proto_info->prototype_users();
- if (!maybe_array->IsWeakFixedArray()) return;
-
- WeakFixedArray* users = WeakFixedArray::cast(maybe_array);
- for (int i = 0; i < users->Length(); ++i) {
- Object* maybe_user = users->Get(i);
- if (maybe_user->IsSmi()) continue;
-
- // For now, only maps register themselves as users.
- Map* user = Map::cast(maybe_user);
+ WeakFixedArray::Iterator iterator(proto_info->prototype_users());
+ // For now, only maps register themselves as users.
+ Map* user;
+ while ((user = iterator.Next<Map>())) {
// Walk the prototype chain (backwards, towards leaf objects) if necessary.
InvalidatePrototypeChainsInternal(user);
}
@@ -9721,7 +10566,7 @@ void JSObject::InvalidatePrototypeChains(Map* map) {
DisallowHeapAllocation no_gc;
if (map->IsJSGlobalProxyMap()) {
PrototypeIterator iter(map);
- map = JSObject::cast(iter.GetCurrent())->map();
+ map = iter.GetCurrent<JSObject>()->map();
}
InvalidatePrototypeChainsInternal(map);
}
@@ -9761,7 +10606,7 @@ Handle<Cell> Map::GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
Handle<JSObject> prototype = Handle<JSObject>::cast(maybe_prototype);
if (prototype->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, prototype);
- prototype = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ prototype = PrototypeIterator::GetCurrent<JSObject>(iter);
}
// Ensure the prototype is registered with its own prototypes so its cell
// will be invalidated when necessary.
@@ -9860,6 +10705,9 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
function->set_prototype_or_initial_map(*value);
} else {
Handle<Map> new_map = Map::Copy(initial_map, "SetInstancePrototype");
+ if (function->map()->is_strong()) {
+ new_map->set_is_strong();
+ }
JSFunction::SetInitialMap(function, new_map, value);
// If the function is used as the global Array function, cache the
@@ -9897,7 +10745,7 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Object> value) {
- DCHECK(function->should_have_prototype());
+ DCHECK(function->IsConstructor());
Handle<Object> construct_prototype = value;
// If the value is not a JSReceiver, store the value in the map's
@@ -9984,6 +10832,9 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
in_object_properties = function->shared()->CalculateInObjectProperties();
}
Handle<Map> map = isolate->factory()->NewMap(instance_type, instance_size);
+ if (function->map()->is_strong()) {
+ map->set_is_strong();
+ }
// Fetch or allocate prototype.
Handle<Object> prototype;
@@ -9997,7 +10848,8 @@ void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
DCHECK(map->has_fast_object_elements());
// Finally link initial map and constructor function.
- JSFunction::SetInitialMap(function, map, Handle<JSReceiver>::cast(prototype));
+ DCHECK(prototype->IsJSReceiver());
+ JSFunction::SetInitialMap(function, map, prototype);
if (!function->shared()->is_generator()) {
function->StartInobjectSlackTracking();
@@ -10108,8 +10960,8 @@ int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
DisallowHeapAllocation no_allocation;
FixedArray* line_ends_array = FixedArray::cast(script->line_ends());
- line_number = line_number - script->line_offset()->value();
- if (line_number == 0) return code_pos + script->column_offset()->value();
+ line_number = line_number - script->line_offset();
+ if (line_number == 0) return code_pos + script->column_offset();
int prev_line_end_pos =
Smi::cast(line_ends_array->get(line_number - 1))->value();
return code_pos - (prev_line_end_pos + 1);
@@ -10124,7 +10976,7 @@ int Script::GetLineNumberWithArray(int code_pos) {
if (line_ends_len == 0) return -1;
if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
- return line_offset()->value();
+ return line_offset();
}
int left = 0;
@@ -10136,7 +10988,7 @@ int Script::GetLineNumberWithArray(int code_pos) {
left += half;
}
}
- return right + line_offset()->value();
+ return right + line_offset();
}
@@ -10212,22 +11064,48 @@ Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
MaybeHandle<SharedFunctionInfo> Script::FindSharedFunctionInfo(
FunctionLiteral* fun) {
- if (shared_function_infos()->IsWeakFixedArray()) {
- WeakFixedArray* array = WeakFixedArray::cast(shared_function_infos());
- for (int i = 0; i < array->Length(); i++) {
- Object* obj = array->Get(i);
- if (!obj->IsSharedFunctionInfo()) continue;
- SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
- if (fun->function_token_position() == shared->function_token_position() &&
- fun->start_position() == shared->start_position()) {
- return Handle<SharedFunctionInfo>(shared);
- }
+ WeakFixedArray::Iterator iterator(shared_function_infos());
+ SharedFunctionInfo* shared;
+ while ((shared = iterator.Next<SharedFunctionInfo>())) {
+ if (fun->function_token_position() == shared->function_token_position() &&
+ fun->start_position() == shared->start_position()) {
+ return Handle<SharedFunctionInfo>(shared);
}
}
return MaybeHandle<SharedFunctionInfo>();
}
+Script::Iterator::Iterator(Isolate* isolate)
+ : iterator_(isolate->heap()->script_list()) {}
+
+
+Script* Script::Iterator::Next() { return iterator_.Next<Script>(); }
+
+
+SharedFunctionInfo::Iterator::Iterator(Isolate* isolate)
+ : script_iterator_(isolate), sfi_iterator_(NULL) {
+ NextScript();
+}
+
+
+bool SharedFunctionInfo::Iterator::NextScript() {
+ Script* script = script_iterator_.Next();
+ if (script == NULL) return false;
+ sfi_iterator_.Reset(script->shared_function_infos());
+ return true;
+}
+
+
+SharedFunctionInfo* SharedFunctionInfo::Iterator::Next() {
+ do {
+ SharedFunctionInfo* next = sfi_iterator_.Next<SharedFunctionInfo>();
+ if (next != NULL) return next;
+ } while (NextScript());
+ return NULL;
+}
+
+
void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<Object> script_object) {
if (shared->script() == *script_object) return;
@@ -10245,10 +11123,11 @@ void SharedFunctionInfo::SetScript(Handle<SharedFunctionInfo> shared,
Handle<Script> script = Handle<Script>::cast(script_object);
Handle<Object> list(script->shared_function_infos(), shared->GetIsolate());
#ifdef DEBUG
- if (list->IsWeakFixedArray()) {
- Handle<WeakFixedArray> array = Handle<WeakFixedArray>::cast(list);
- for (int i = 0; i < array->Length(); ++i) {
- DCHECK(array->Get(i) != *shared);
+ {
+ WeakFixedArray::Iterator iterator(*list);
+ SharedFunctionInfo* next;
+ while ((next = iterator.Next<SharedFunctionInfo>())) {
+ DCHECK_NE(next, *shared);
}
}
#endif // DEBUG
@@ -10471,8 +11350,8 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
-CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
- Context* native_context, BailoutId osr_ast_id) {
+int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context,
+ BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(native_context->IsNativeContext());
Object* value = optimized_code_map();
@@ -10483,21 +11362,41 @@ CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
for (int i = kEntriesStart; i < length; i += kEntryLength) {
if (optimized_code_map->get(i + kContextOffset) == native_context &&
optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
- return {Code::cast(optimized_code_map->get(i + kCachedCodeOffset)),
- FixedArray::cast(optimized_code_map->get(i + kLiteralsOffset))};
+ return i;
}
}
Object* shared_code = optimized_code_map->get(kSharedCodeIndex);
if (shared_code->IsCode() && osr_ast_id.IsNone()) {
- return {Code::cast(shared_code), nullptr};
+ return kSharedCodeIndex;
}
- if (FLAG_trace_opt) {
- PrintF("[didn't find optimized code in optimized code map for ");
- ShortPrint();
- PrintF("]\n");
+ }
+ return -1;
+}
+
+
+CodeAndLiterals SharedFunctionInfo::SearchOptimizedCodeMap(
+ Context* native_context, BailoutId osr_ast_id) {
+ CodeAndLiterals result = {nullptr, nullptr};
+ int entry = SearchOptimizedCodeMapEntry(native_context, osr_ast_id);
+ if (entry != kNotFound) {
+ FixedArray* code_map = FixedArray::cast(optimized_code_map());
+ if (entry == kSharedCodeIndex) {
+ result = {Code::cast(code_map->get(kSharedCodeIndex)), nullptr};
+
+ } else {
+ DCHECK_LE(entry + kEntryLength, code_map->length());
+ Object* code = code_map->get(entry + kCachedCodeOffset);
+ result = {code->IsUndefined() ? nullptr : Code::cast(code),
+ LiteralsArray::cast(code_map->get(entry + kLiteralsOffset))};
}
}
- return {nullptr, nullptr};
+ if (FLAG_trace_opt && !optimized_code_map()->IsSmi() &&
+ result.code == nullptr) {
+ PrintF("[didn't find optimized code in optimized code map for ");
+ ShortPrint();
+ PrintF("]\n");
+ }
+ return result;
}
@@ -10605,7 +11504,7 @@ void Code::Relocate(intptr_t delta) {
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
it.rinfo()->apply(delta);
}
- CpuFeatures::FlushICache(instruction_start(), instruction_size());
+ Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
}
@@ -10659,7 +11558,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
it.rinfo()->apply(delta);
}
}
- CpuFeatures::FlushICache(instruction_start(), instruction_size());
+ Assembler::FlushICache(GetIsolate(), instruction_start(), instruction_size());
}
@@ -11439,7 +12338,7 @@ void Code::PrintExtraICState(std::ostream& os, // NOLINT
void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "kind = " << Kind2String(kind()) << "\n";
if (IsCodeStubOrIC()) {
- const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this), true);
+ const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this));
os << "major_key = " << (n == NULL ? "null" : n) << "\n";
}
if (is_inline_cache_stub()) {
@@ -11584,6 +12483,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
void BytecodeArray::Disassemble(std::ostream& os) {
+ os << "Parameter count " << parameter_count() << "\n";
os << "Frame size " << frame_size() << "\n";
Vector<char> buf = Vector<char>::New(50);
@@ -11597,9 +12497,12 @@ void BytecodeArray::Disassemble(std::ostream& os) {
SNPrintF(buf, "%p", bytecode_start);
os << buf.start() << " : ";
- interpreter::Bytecodes::Decode(os, bytecode_start);
+ interpreter::Bytecodes::Decode(os, bytecode_start, parameter_count());
os << "\n";
}
+
+ os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+ constant_pool()->Print();
}
@@ -12061,7 +12964,7 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
for (PrototypeIterator iter(isolate, *value,
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
- if (JSReceiver::cast(iter.GetCurrent()) == *object) {
+ if (iter.GetCurrent<JSReceiver>() == *object) {
// Cycle detected.
THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kCyclicProto),
Object);
@@ -12077,8 +12980,7 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// hidden and set the new prototype on that object.
PrototypeIterator iter(isolate, real_receiver);
while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
- real_receiver =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ real_receiver = PrototypeIterator::GetCurrent<JSObject>(iter);
iter.Advance();
if (!real_receiver->map()->is_extensible()) {
THROW_NEW_ERROR(
@@ -12107,7 +13009,7 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
// If the prototype chain didn't previously have element callbacks, then
// KeyedStoreICs need to be cleared to ensure any that involve this
// map go generic.
- object->GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ object->GetHeap()->ClearAllKeyedStoreICs();
}
heap->ClearInstanceofCache();
@@ -12277,7 +13179,7 @@ MaybeHandle<Object> JSObject::AddDataElement(Handle<JSObject> object,
to = GetHoleyElementsKind(to);
kind = GetHoleyElementsKind(kind);
}
- to = IsMoreGeneralElementsKindTransition(kind, to) ? to : kind;
+ to = GetMoreGeneralElementsKind(kind, to);
ElementsAccessor* accessor = ElementsAccessor::ForKind(to);
accessor->Add(object, index, value, attributes, new_capacity);
@@ -13834,7 +14736,7 @@ Handle<Object> JSObject::PrepareElementsForSort(Handle<JSObject> object,
Handle<FixedArrayBase> elements_base(object->elements());
uint32_t elements_length = static_cast<uint32_t>(elements_base->length());
if (limit > elements_length) {
- limit = elements_length ;
+ limit = elements_length;
}
if (limit == 0) {
return handle(Smi::FromInt(0), isolate);
@@ -13950,12 +14852,6 @@ size_t JSTypedArray::element_size() {
}
-void FixedArray::SetValue(uint32_t index, Object* value) { set(index, value); }
-
-
-void FixedDoubleArray::SetValue(uint32_t index, Object* value) {
- set(index, value->Number());
-}
void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
Handle<Name> name) {
DCHECK(!global->HasFastProperties());
@@ -14106,7 +15002,7 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
// We need a key instance for the virtual hash function.
InternalizedStringKey dummy_key(Handle<String>::null());
table = StringTable::EnsureCapacity(table, expected, &dummy_key);
- isolate->factory()->set_string_table(table);
+ isolate->heap()->SetRootStringTable(*table);
}
@@ -14140,7 +15036,7 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
table->set(EntryToIndex(entry), *string);
table->ElementAdded();
- isolate->factory()->set_string_table(table);
+ isolate->heap()->SetRootStringTable(*table);
return Handle<String>::cast(string);
}
@@ -14521,7 +15417,7 @@ void SeededNumberDictionary::UpdateMaxNumberKey(uint32_t key,
if (key > kRequiresSlowElementsLimit) {
if (used_as_prototype) {
// TODO(verwaest): Remove this hack.
- GetHeap()->ClearAllICsByKind(Code::KEYED_STORE_IC);
+ GetHeap()->ClearAllKeyedStoreICs();
}
set_requires_slow_elements();
return;
@@ -14870,49 +15766,6 @@ void WeakHashTable::AddEntry(int entry, Handle<WeakCell> key_cell,
}
-#ifdef DEBUG
-Object* WeakValueHashTable::LookupWeak(Handle<Object> key) {
- Object* value = Lookup(key);
- if (value->IsWeakCell() && !WeakCell::cast(value)->cleared()) {
- value = WeakCell::cast(value)->value();
- }
- return value;
-}
-#endif // DEBUG
-
-
-Handle<WeakValueHashTable> WeakValueHashTable::PutWeak(
- Handle<WeakValueHashTable> table, Handle<Object> key,
- Handle<HeapObject> value) {
- Handle<WeakCell> cell = value->GetIsolate()->factory()->NewWeakCell(value);
- return Handle<WeakValueHashTable>::cast(
- Put(Handle<ObjectHashTable>::cast(table), key, cell));
-}
-
-
-Handle<FixedArray> WeakValueHashTable::GetWeakValues(
- Handle<WeakValueHashTable> table) {
- Isolate* isolate = table->GetIsolate();
- uint32_t capacity = table->Capacity();
- Handle<FixedArray> results = isolate->factory()->NewFixedArray(capacity);
- int length = 0;
- for (uint32_t i = 0; i < capacity; i++) {
- uint32_t key_index = table->EntryToIndex(i);
- Object* key = table->get(key_index);
- if (!table->IsKey(key)) continue;
- uint32_t value_index = table->EntryToValueIndex(i);
- WeakCell* value_cell = WeakCell::cast(table->get(value_index));
- if (value_cell->cleared()) {
- table->RemoveEntry(i);
- } else {
- results->set(length++, value_cell->value());
- }
- }
- results->Shrink(length);
- return results;
-}
-
-
template<class Derived, class Iterator, int entrysize>
Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Allocate(
Isolate* isolate, int capacity, PretenureFlag pretenure) {
@@ -14985,6 +15838,48 @@ Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Clear(
return new_table;
}
+template <class Derived, class Iterator, int entrysize>
+bool OrderedHashTable<Derived, Iterator, entrysize>::HasKey(
+ Handle<Derived> table, Handle<Object> key) {
+ int entry = table->KeyToFirstEntry(*key);
+ // Walk the chain in the bucket to find the key.
+ while (entry != kNotFound) {
+ Object* candidate_key = table->KeyAt(entry);
+ if (candidate_key->SameValueZero(*key)) return true;
+ entry = table->NextChainEntry(entry);
+ }
+ return false;
+}
+
+
+Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
+ Handle<Object> key) {
+ int hash = Object::GetOrCreateHash(table->GetIsolate(), key)->value();
+ int entry = table->HashToEntry(hash);
+ // Walk the chain of the bucket and try finding the key.
+ while (entry != kNotFound) {
+ Object* candidate_key = table->KeyAt(entry);
+ // Do not add if we have the key already
+ if (candidate_key->SameValueZero(*key)) return table;
+ entry = table->NextChainEntry(entry);
+ }
+
+ table = OrderedHashSet::EnsureGrowable(table);
+ // Read the existing bucket values.
+ int bucket = table->HashToBucket(hash);
+ int previous_entry = table->HashToEntry(hash);
+ int nof = table->NumberOfElements();
+ // Insert a new entry at the end,
+ int new_entry = nof + table->NumberOfDeletedElements();
+ int new_index = table->EntryToIndex(new_entry);
+ table->set(new_index, *key);
+ table->set(new_index + kChainOffset, Smi::FromInt(previous_entry));
+ // and point the bucket to the new entry.
+ table->set(kHashTableStartIndex + bucket, Smi::FromInt(new_entry));
+ table->SetNumberOfElements(nof + 1);
+ return table;
+}
+
template<class Derived, class Iterator, int entrysize>
Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash(
@@ -15047,6 +15942,9 @@ template Handle<OrderedHashSet>
OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Clear(
Handle<OrderedHashSet> table);
+template bool OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::HasKey(
+ Handle<OrderedHashSet> table, Handle<Object> key);
+
template Handle<OrderedHashMap>
OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Allocate(
@@ -15064,6 +15962,9 @@ template Handle<OrderedHashMap>
OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Clear(
Handle<OrderedHashMap> table);
+template bool OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::HasKey(
+ Handle<OrderedHashMap> table, Handle<Object> key);
+
template<class Derived, class TableType>
void OrderedHashTableIterator<Derived, TableType>::Transition() {
@@ -15169,6 +16070,75 @@ template void
OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Transition();
+void JSSet::Initialize(Handle<JSSet> set, Isolate* isolate) {
+ Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
+ set->set_table(*table);
+}
+
+
+void JSSet::Clear(Handle<JSSet> set) {
+ Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
+ table = OrderedHashSet::Clear(table);
+ set->set_table(*table);
+}
+
+
+void JSMap::Initialize(Handle<JSMap> map, Isolate* isolate) {
+ Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
+ map->set_table(*table);
+}
+
+
+void JSMap::Clear(Handle<JSMap> map) {
+ Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
+ table = OrderedHashMap::Clear(table);
+ map->set_table(*table);
+}
+
+
+void JSWeakCollection::Initialize(Handle<JSWeakCollection> weak_collection,
+ Isolate* isolate) {
+ DCHECK_EQ(0, weak_collection->map()->GetInObjectProperties());
+ Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
+ weak_collection->set_table(*table);
+}
+
+
+void JSWeakCollection::Set(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, Handle<Object> value,
+ int32_t hash) {
+ DCHECK(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ DCHECK(table->IsKey(*key));
+ Handle<ObjectHashTable> new_table =
+ ObjectHashTable::Put(table, key, value, hash);
+ weak_collection->set_table(*new_table);
+ if (*table != *new_table) {
+ // Zap the old table since we didn't record slots for its elements.
+ table->FillWithHoles(0, table->length());
+ }
+}
+
+
+bool JSWeakCollection::Delete(Handle<JSWeakCollection> weak_collection,
+ Handle<Object> key, int32_t hash) {
+ DCHECK(key->IsJSReceiver() || key->IsSymbol());
+ Handle<ObjectHashTable> table(
+ ObjectHashTable::cast(weak_collection->table()));
+ DCHECK(table->IsKey(*key));
+ bool was_present = false;
+ Handle<ObjectHashTable> new_table =
+ ObjectHashTable::Remove(table, key, &was_present, hash);
+ weak_collection->set_table(*new_table);
+ if (*table != *new_table) {
+ // Zap the old table since we didn't record slots for its elements.
+ table->FillWithHoles(0, table->length());
+ }
+ return was_present;
+}
+
+
// Check if there is a break point at this code position.
bool DebugInfo::HasBreakPoint(int code_position) {
// Get the break point info object for this code position.
@@ -15249,10 +16219,9 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
// Allocate new BreakPointInfo object and set the break point.
Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
isolate->factory()->NewStruct(BREAK_POINT_INFO_TYPE));
- new_break_point_info->set_code_position(Smi::FromInt(code_position));
- new_break_point_info->set_source_position(Smi::FromInt(source_position));
- new_break_point_info->
- set_statement_position(Smi::FromInt(statement_position));
+ new_break_point_info->set_code_position(code_position);
+ new_break_point_info->set_source_position(source_position);
+ new_break_point_info->set_statement_position(statement_position);
new_break_point_info->set_break_point_objects(
isolate->heap()->undefined_value());
BreakPointInfo::SetBreakPoint(new_break_point_info, break_point_object);
@@ -15314,7 +16283,7 @@ int DebugInfo::GetBreakPointInfoIndex(int code_position) {
if (!break_points()->get(i)->IsUndefined()) {
BreakPointInfo* break_point_info =
BreakPointInfo::cast(break_points()->get(i));
- if (break_point_info->code_position()->value() == code_position) {
+ if (break_point_info->code_position() == code_position) {
return i;
}
}
@@ -15536,6 +16505,27 @@ void JSDate::SetValue(Object* value, bool is_value_nan) {
}
+// static
+MaybeHandle<Object> JSDate::ToPrimitive(Handle<JSReceiver> receiver,
+ Handle<Object> hint) {
+ Isolate* const isolate = receiver->GetIsolate();
+ if (hint->IsString()) {
+ Handle<String> hint_string = Handle<String>::cast(hint);
+ if (hint_string->Equals(isolate->heap()->number_string())) {
+ return JSReceiver::OrdinaryToPrimitive(receiver,
+ OrdinaryToPrimitiveHint::kNumber);
+ }
+ if (hint_string->Equals(isolate->heap()->default_string()) ||
+ hint_string->Equals(isolate->heap()->string_string())) {
+ return JSReceiver::OrdinaryToPrimitive(receiver,
+ OrdinaryToPrimitiveHint::kString);
+ }
+ }
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kInvalidHint, hint),
+ Object);
+}
+
+
void JSDate::SetCachedFields(int64_t local_time_ms, DateCache* date_cache) {
int days = DateCache::DaysFromTime(local_time_ms);
int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
@@ -15565,6 +16555,61 @@ void JSArrayBuffer::Neuter() {
}
+void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ bool is_external, void* data, size_t allocated_length,
+ SharedFlag shared) {
+ DCHECK(array_buffer->GetInternalFieldCount() ==
+ v8::ArrayBuffer::kInternalFieldCount);
+ for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
+ array_buffer->SetInternalField(i, Smi::FromInt(0));
+ }
+ array_buffer->set_bit_field(0);
+ array_buffer->set_is_external(is_external);
+ array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
+ array_buffer->set_is_shared(shared == SharedFlag::kShared);
+
+ Handle<Object> byte_length =
+ isolate->factory()->NewNumberFromSize(allocated_length);
+ CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
+ array_buffer->set_byte_length(*byte_length);
+ // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
+ // are currently being constructed in the |ArrayBufferTracker|. The
+ // registration method below handles the case of registering a buffer that has
+ // already been promoted.
+ array_buffer->set_backing_store(data);
+
+ if (data && !is_external) {
+ isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
+ }
+}
+
+
+bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
+ Isolate* isolate,
+ size_t allocated_length,
+ bool initialize, SharedFlag shared) {
+ void* data;
+ CHECK(isolate->array_buffer_allocator() != NULL);
+ // Prevent creating array buffers when serializing.
+ DCHECK(!isolate->serializer_enabled());
+ if (allocated_length != 0) {
+ if (initialize) {
+ data = isolate->array_buffer_allocator()->Allocate(allocated_length);
+ } else {
+ data = isolate->array_buffer_allocator()->AllocateUninitialized(
+ allocated_length);
+ }
+ if (data == NULL) return false;
+ } else {
+ data = NULL;
+ }
+
+ JSArrayBuffer::Setup(array_buffer, isolate, false, data, allocated_length,
+ shared);
+ return true;
+}
+
+
Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array) {
@@ -15581,11 +16626,16 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
fixed_typed_array->DataSize());
- buffer->set_backing_store(backing_store);
buffer->set_is_external(false);
- isolate->heap()->RegisterNewArrayBuffer(isolate->heap()->InNewSpace(*buffer),
- backing_store,
- fixed_typed_array->DataSize());
+ DCHECK(buffer->byte_length()->IsSmi() ||
+ buffer->byte_length()->IsHeapNumber());
+ DCHECK(NumberToInt32(buffer->byte_length()) == fixed_typed_array->DataSize());
+ // Initialize backing store at last to avoid handling of |JSArrayBuffers| that
+ // are currently being constructed in the |ArrayBufferTracker|. The
+ // registration method below handles the case of registering a buffer that has
+ // already been promoted.
+ buffer->set_backing_store(backing_store);
+ isolate->heap()->RegisterNewArrayBuffer(*buffer);
memcpy(buffer->backing_store(),
fixed_typed_array->DataPtr(),
fixed_typed_array->DataSize());
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 9e6a068c4c..225a7db42e 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -19,7 +19,7 @@
#include "src/flags.h"
#include "src/list.h"
#include "src/property-details.h"
-#include "src/unicode-inl.h"
+#include "src/unicode.h"
#include "src/unicode-decoder.h"
#include "src/zone.h"
@@ -76,6 +76,7 @@
// - BytecodeArray
// - FixedArray
// - DescriptorArray
+// - LiteralsArray
// - HashTable
// - Dictionary
// - StringTable
@@ -115,10 +116,13 @@
// - Simd128Value
// - Float32x4
// - Int32x4
+// - Uint32x4
// - Bool32x4
// - Int16x8
+// - Uint16x8
// - Bool16x8
// - Int8x16
+// - Uint8x16
// - Bool8x16
// - Cell
// - PropertyCell
@@ -155,24 +159,26 @@ namespace internal {
enum KeyedAccessStoreMode {
STANDARD_STORE,
- STORE_TRANSITION_SMI_TO_OBJECT,
- STORE_TRANSITION_SMI_TO_DOUBLE,
- STORE_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
+ STORE_TRANSITION_TO_OBJECT,
+ STORE_TRANSITION_TO_DOUBLE,
STORE_AND_GROW_NO_TRANSITION,
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT,
- STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE,
- STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_TO_OBJECT,
+ STORE_AND_GROW_TRANSITION_TO_DOUBLE,
STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
STORE_NO_TRANSITION_HANDLE_COW
};
+// Valid hints for the abstract operation ToPrimitive,
+// implemented according to ES6, section 7.1.1.
+enum class ToPrimitiveHint { kDefault, kNumber, kString };
+
+
+// Valid hints for the abstract operation OrdinaryToPrimitive,
+// implemented according to ES6, section 7.1.1.
+enum class OrdinaryToPrimitiveHint { kNumber, kString };
+
+
enum TypeofMode { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@@ -195,34 +201,11 @@ enum ExternalArrayType {
};
-static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
- STANDARD_STORE;
-STATIC_ASSERT(STANDARD_STORE == 0);
-STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT -
- STORE_TRANSITION_SMI_TO_OBJECT);
-STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE -
- STORE_TRANSITION_SMI_TO_DOUBLE);
-STATIC_ASSERT(kGrowICDelta ==
- STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT -
- STORE_TRANSITION_DOUBLE_TO_OBJECT);
-
-
-static inline KeyedAccessStoreMode GetGrowStoreMode(
- KeyedAccessStoreMode store_mode) {
- if (store_mode < STORE_AND_GROW_NO_TRANSITION) {
- store_mode = static_cast<KeyedAccessStoreMode>(
- static_cast<int>(store_mode) + kGrowICDelta);
- }
- return store_mode;
-}
-
-
static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) {
- return store_mode > STANDARD_STORE &&
- store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT &&
- store_mode != STORE_AND_GROW_NO_TRANSITION;
+ return store_mode == STORE_TRANSITION_TO_OBJECT ||
+ store_mode == STORE_TRANSITION_TO_DOUBLE ||
+ store_mode == STORE_AND_GROW_TRANSITION_TO_OBJECT ||
+ store_mode == STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
@@ -240,7 +223,7 @@ static inline KeyedAccessStoreMode GetNonTransitioningStoreMode(
static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) {
return store_mode >= STORE_AND_GROW_NO_TRANSITION &&
- store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+ store_mode <= STORE_AND_GROW_TRANSITION_TO_DOUBLE;
}
@@ -422,6 +405,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
V(BOX_TYPE) \
V(PROTOTYPE_INFO_TYPE) \
+ V(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE) \
\
V(FIXED_ARRAY_TYPE) \
V(FIXED_DOUBLE_ARRAY_TYPE) \
@@ -448,6 +432,7 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(JS_MAP_TYPE) \
V(JS_SET_ITERATOR_TYPE) \
V(JS_MAP_ITERATOR_TYPE) \
+ V(JS_ITERATOR_RESULT_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_WEAK_SET_TYPE) \
V(JS_REGEXP_TYPE) \
@@ -538,7 +523,10 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
V(DEBUG_INFO, DebugInfo, debug_info) \
V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) \
- V(PROTOTYPE_INFO, PrototypeInfo, prototype_info)
+ V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
+ V(SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION, \
+ SloppyBlockWithEvalContextExtension, \
+ sloppy_block_with_eval_context_extension)
// We use the full 8 bits of the instance_type field to encode heap object
// instance types. The high-order bit (bit 7) is set if the object is not a
@@ -716,6 +704,7 @@ enum InstanceType {
WEAK_CELL_TYPE,
PROPERTY_CELL_TYPE,
PROTOTYPE_INFO_TYPE,
+ SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -742,6 +731,7 @@ enum InstanceType {
JS_MAP_TYPE,
JS_SET_ITERATOR_TYPE,
JS_MAP_ITERATOR_TYPE,
+ JS_ITERATOR_RESULT_TYPE,
JS_WEAK_MAP_TYPE,
JS_WEAK_SET_TYPE,
JS_REGEXP_TYPE,
@@ -811,6 +801,7 @@ enum FixedArraySubInstanceType {
};
+// TODO(bmeurer): Remove this in favor of the ComparisonResult below.
enum CompareResult {
LESS = -1,
EQUAL = 0,
@@ -820,9 +811,23 @@ enum CompareResult {
};
-#define DECL_BOOLEAN_ACCESSORS(name) \
- inline bool name() const; \
- inline void set_##name(bool value); \
+// Result of an abstract relational comparison of x and y, implemented according
+// to ES6 section 7.2.11 Abstract Relational Comparison.
+enum class ComparisonResult {
+ kLessThan, // x < y
+ kEqual, // x = y
+ kGreaterThan, // x > y
+ kUndefined // at least one of x or y was undefined or NaN
+};
+
+
+#define DECL_BOOLEAN_ACCESSORS(name) \
+ inline bool name() const; \
+ inline void set_##name(bool value);
+
+#define DECL_INT_ACCESSORS(name) \
+ inline int name() const; \
+ inline void set_##name(int value);
#define DECL_ACCESSORS(name, type) \
@@ -848,6 +853,7 @@ class FunctionLiteral;
class GlobalObject;
class JSBuiltinsObject;
class LayoutDescriptor;
+class LiteralsArray;
class LookupIterator;
class ObjectHashTable;
class ObjectVisitor;
@@ -892,10 +898,13 @@ template <class C> inline bool Is(Object* obj);
V(Simd128Value) \
V(Float32x4) \
V(Int32x4) \
+ V(Uint32x4) \
V(Bool32x4) \
V(Int16x8) \
+ V(Uint16x8) \
V(Bool16x8) \
V(Int8x16) \
+ V(Uint8x16) \
V(Bool8x16) \
V(Name) \
V(UniqueName) \
@@ -933,6 +942,7 @@ template <class C> inline bool Is(Object* obj);
V(Map) \
V(DescriptorArray) \
V(TransitionArray) \
+ V(LiteralsArray) \
V(TypeFeedbackVector) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
@@ -967,6 +977,7 @@ template <class C> inline bool Is(Object* obj);
V(JSMap) \
V(JSSetIterator) \
V(JSMapIterator) \
+ V(JSIteratorResult) \
V(JSWeakCollection) \
V(JSWeakMap) \
V(JSWeakSet) \
@@ -991,7 +1002,6 @@ template <class C> inline bool Is(Object* obj);
V(WeakCell) \
V(ObjectHashTable) \
V(WeakHashTable) \
- V(WeakValueHashTable) \
V(OrderedHashTable)
// Object is the abstract superclass for all classes in the
@@ -1027,8 +1037,13 @@ class Object {
STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
#undef DECLARE_STRUCT_PREDICATE
+ // ES6, section 7.2.3 IsCallable.
+ INLINE(bool IsCallable() const);
+
+ // ES6, section 7.2.4 IsConstructor.
+ INLINE(bool IsConstructor() const);
+
INLINE(bool IsSpecObject()) const;
- INLINE(bool IsSpecFunction()) const;
INLINE(bool IsTemplateInfo()) const;
INLINE(bool IsNameDictionary() const);
INLINE(bool IsGlobalDictionary() const);
@@ -1036,7 +1051,6 @@ class Object {
INLINE(bool IsUnseededNumberDictionary() const);
INLINE(bool IsOrderedHashSet() const);
INLINE(bool IsOrderedHashMap() const);
- bool IsCallable() const;
static bool IsPromise(Handle<Object> object);
// Oddball testing.
@@ -1053,7 +1067,7 @@ class Object {
INLINE(bool IsFiller() const);
// Extract the number.
- inline double Number();
+ inline double Number() const;
INLINE(bool IsNaN() const);
INLINE(bool IsMinusZero() const);
bool ToInt32(int32_t* value);
@@ -1088,6 +1102,13 @@ class Object {
bool BooleanValue(); // ECMA-262 9.2.
+ // ES6 section 7.2.11 Abstract Relational Comparison
+ MUST_USE_RESULT static Maybe<ComparisonResult> Compare(
+ Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+
+ // ES6 section 7.2.12 Abstract Equality Comparison
+ MUST_USE_RESULT static Maybe<bool> Equals(Handle<Object> x, Handle<Object> y);
+
// ES6 section 7.2.13 Strict Equality Comparison
bool StrictEquals(Object* that);
@@ -1095,9 +1116,97 @@ class Object {
// native_context is used when creating wrapper object.
static inline MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
Handle<Object> object);
- static MaybeHandle<JSReceiver> ToObject(Isolate* isolate,
- Handle<Object> object,
- Handle<Context> context);
+ MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
+ Isolate* isolate, Handle<Object> object, Handle<Context> context);
+
+ // ES6 section 7.1.14 ToPropertyKey
+ MUST_USE_RESULT static MaybeHandle<Name> ToName(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.1.1 ToPrimitive
+ MUST_USE_RESULT static inline MaybeHandle<Object> ToPrimitive(
+ Handle<Object> input, ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+
+ // ES6 section 7.1.3 ToNumber
+ MUST_USE_RESULT static MaybeHandle<Object> ToNumber(Handle<Object> input);
+
+ // ES6 section 7.1.4 ToInteger
+ MUST_USE_RESULT static MaybeHandle<Object> ToInteger(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.1.5 ToInt32
+ MUST_USE_RESULT static MaybeHandle<Object> ToInt32(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.1.6 ToUint32
+ MUST_USE_RESULT static MaybeHandle<Object> ToUint32(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.1.12 ToString
+ MUST_USE_RESULT static MaybeHandle<String> ToString(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.1.15 ToLength
+ MUST_USE_RESULT static MaybeHandle<Object> ToLength(Isolate* isolate,
+ Handle<Object> input);
+
+ // ES6 section 7.3.9 GetMethod
+ MUST_USE_RESULT static MaybeHandle<Object> GetMethod(
+ Handle<JSReceiver> receiver, Handle<Name> name);
+
+ // ES6 section 12.5.6 The typeof Operator
+ static Handle<String> TypeOf(Isolate* isolate, Handle<Object> object);
+
+ // ES6 section 12.6 Multiplicative Operators
+ MUST_USE_RESULT static MaybeHandle<Object> Multiply(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> Divide(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> Modulus(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+
+ // ES6 section 12.7 Additive Operators
+ MUST_USE_RESULT static MaybeHandle<Object> Add(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> Subtract(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+
+ // ES6 section 12.8 Bitwise Shift Operators
+ MUST_USE_RESULT static MaybeHandle<Object> ShiftLeft(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> ShiftRight(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> ShiftRightLogical(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+
+ // ES6 section 12.9 Relational Operators
+ MUST_USE_RESULT static inline Maybe<bool> GreaterThan(
+ Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static inline Maybe<bool> GreaterThanOrEqual(
+ Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static inline Maybe<bool> LessThan(
+ Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static inline Maybe<bool> LessThanOrEqual(
+ Handle<Object> x, Handle<Object> y, Strength strength = Strength::WEAK);
+
+ // ES6 section 12.11 Binary Bitwise Operators
+ MUST_USE_RESULT static MaybeHandle<Object> BitwiseAnd(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> BitwiseOr(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
+ MUST_USE_RESULT static MaybeHandle<Object> BitwiseXor(
+ Isolate* isolate, Handle<Object> lhs, Handle<Object> rhs,
+ Strength strength = Strength::WEAK);
MUST_USE_RESULT static MaybeHandle<Object> GetProperty(
LookupIterator* it, LanguageMode language_mode = SLOPPY);
@@ -1596,8 +1705,11 @@ class Simd128Value : public HeapObject {
DECLARE_PRINTER(Simd128Value)
DECLARE_VERIFIER(Simd128Value)
+ static Handle<String> ToString(Handle<Simd128Value> input);
+
// Equality operations.
inline bool Equals(Simd128Value* that);
+ static inline bool Equals(Handle<Simd128Value> one, Handle<Simd128Value> two);
// Checks that another instance is bit-wise equal.
bool BitwiseEquals(const Simd128Value* other) const;
@@ -1619,10 +1731,13 @@ class Simd128Value : public HeapObject {
#define SIMD128_TYPES(V) \
V(FLOAT32X4, Float32x4, float32x4, 4, float) \
V(INT32X4, Int32x4, int32x4, 4, int32_t) \
+ V(UINT32X4, Uint32x4, uint32x4, 4, uint32_t) \
V(BOOL32X4, Bool32x4, bool32x4, 4, bool) \
V(INT16X8, Int16x8, int16x8, 8, int16_t) \
+ V(UINT16X8, Uint16x8, uint16x8, 8, uint16_t) \
V(BOOL16X8, Bool16x8, bool16x8, 8, bool) \
V(INT8X16, Int8x16, int8x16, 16, int8_t) \
+ V(UINT8X16, Uint8x16, uint8x16, 16, uint8_t) \
V(BOOL8X16, Bool8x16, bool8x16, 16, bool)
#define SIMD128_VALUE_CLASS(TYPE, Type, type, lane_count, lane_type) \
@@ -1635,6 +1750,8 @@ class Simd128Value : public HeapObject {
\
DECLARE_PRINTER(Type) \
\
+ static Handle<String> ToString(Handle<Type> input); \
+ \
inline bool Equals(Type* that); \
\
private: \
@@ -1664,6 +1781,13 @@ class JSReceiver: public HeapObject {
public:
DECLARE_CAST(JSReceiver)
+ // ES6 section 7.1.1 ToPrimitive
+ MUST_USE_RESULT static MaybeHandle<Object> ToPrimitive(
+ Handle<JSReceiver> receiver,
+ ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
+ MUST_USE_RESULT static MaybeHandle<Object> OrdinaryToPrimitive(
+ Handle<JSReceiver> receiver, OrdinaryToPrimitiveHint hint);
+
// Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
MUST_USE_RESULT static inline Maybe<bool> HasProperty(
Handle<JSReceiver> object, Handle<Name> name);
@@ -2369,7 +2493,6 @@ class FixedArray: public FixedArrayBase {
public:
// Setter and getter for elements.
inline Object* get(int index) const;
- void SetValue(uint32_t index, Object* value);
static inline Handle<Object> get(Handle<FixedArray> array, int index);
// Setter that uses write barrier.
inline void set(int index, Object* value);
@@ -2398,17 +2521,6 @@ class FixedArray: public FixedArrayBase {
enum KeyFilter { ALL_KEYS, NON_SYMBOL_KEYS };
- // Add the elements of a JSArray to this FixedArray.
- MUST_USE_RESULT static MaybeHandle<FixedArray> AddKeysFromArrayLike(
- Handle<FixedArray> content, Handle<JSObject> array,
- KeyFilter filter = ALL_KEYS);
-
- // Computes the union of keys and return the result.
- // Used for implementing "for (n in object) { }"
- MUST_USE_RESULT static MaybeHandle<FixedArray> UnionOfKeys(
- Handle<FixedArray> first,
- Handle<FixedArray> second);
-
// Copy a sub array from the receiver to dest.
void CopyTo(int pos, FixedArray* dest, int dest_pos, int len);
@@ -2481,8 +2593,6 @@ class FixedDoubleArray: public FixedArrayBase {
inline double get_scalar(int index);
inline uint64_t get_representation(int index);
static inline Handle<Object> get(Handle<FixedDoubleArray> array, int index);
- // This accessor has to get a Number as |value|.
- void SetValue(uint32_t index, Object* value);
inline void set(int index, double value);
inline void set_the_hole(int index);
@@ -2547,6 +2657,24 @@ class WeakFixedArray : public FixedArray {
inline bool IsEmptySlot(int index) const;
static Object* Empty() { return Smi::FromInt(0); }
+ class Iterator {
+ public:
+ explicit Iterator(Object* maybe_array) : list_(NULL) { Reset(maybe_array); }
+ void Reset(Object* maybe_array);
+
+ template <class T>
+ inline T* Next();
+
+ private:
+ int index_;
+ WeakFixedArray* list_;
+#ifdef DEBUG
+ int last_used_index_;
+ DisallowHeapAllocation no_gc_;
+#endif // DEBUG
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
DECLARE_CAST(WeakFixedArray)
private:
@@ -3571,6 +3699,9 @@ class OrderedHashTable: public FixedArray {
// exisiting iterators can be updated.
static Handle<Derived> Clear(Handle<Derived> table);
+ // Returns a true if the OrderedHashTable contains the key
+ static bool HasKey(Handle<Derived> table, Handle<Object> key);
+
int NumberOfElements() {
return Smi::cast(get(kNumberOfElementsIndex))->value();
}
@@ -3590,6 +3721,26 @@ class OrderedHashTable: public FixedArray {
return kHashTableStartIndex + NumberOfBuckets() + (entry * kEntrySize);
}
+ int HashToBucket(int hash) { return hash & (NumberOfBuckets() - 1); }
+
+ int HashToEntry(int hash) {
+ int bucket = HashToBucket(hash);
+ Object* entry = this->get(kHashTableStartIndex + bucket);
+ return Smi::cast(entry)->value();
+ }
+
+ int KeyToFirstEntry(Object* key) {
+ Object* hash = key->GetHash();
+ // If the object does not have an identity hash, it was never used as a key
+ if (hash->IsUndefined()) return kNotFound;
+ return HashToEntry(Smi::cast(hash)->value());
+ }
+
+ int NextChainEntry(int entry) {
+ Object* next_entry = get(EntryToIndex(entry) + kChainOffset);
+ return Smi::cast(next_entry)->value();
+ }
+
Object* KeyAt(int entry) { return get(EntryToIndex(entry)); }
bool IsObsolete() {
@@ -3636,7 +3787,7 @@ class OrderedHashTable: public FixedArray {
// optimize that case.
static const int kClearedTableSentinel = -1;
- private:
+ protected:
static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
void SetNumberOfBuckets(int num) {
@@ -3678,6 +3829,9 @@ class OrderedHashSet: public OrderedHashTable<
OrderedHashSet, JSSetIterator, 1> {
public:
DECLARE_CAST(OrderedHashSet)
+
+ static Handle<OrderedHashSet> Add(Handle<OrderedHashSet> table,
+ Handle<Object> value);
};
@@ -3742,26 +3896,6 @@ class WeakHashTable: public HashTable<WeakHashTable,
};
-class WeakValueHashTable : public ObjectHashTable {
- public:
- DECLARE_CAST(WeakValueHashTable)
-
-#ifdef DEBUG
- // Looks up the value associated with the given key. The hole value is
- // returned in case the key is not present.
- Object* LookupWeak(Handle<Object> key);
-#endif // DEBUG
-
- // Adds (or overwrites) the value associated with the given key. Mapping a
- // key to the hole value causes removal of the whole entry.
- MUST_USE_RESULT static Handle<WeakValueHashTable> PutWeak(
- Handle<WeakValueHashTable> table, Handle<Object> key,
- Handle<HeapObject> value);
-
- static Handle<FixedArray> GetWeakValues(Handle<WeakValueHashTable> table);
-};
-
-
// ScopeInfo represents information about different scopes of a source
// program and the allocation of the scope's variables. Scope information
// is stored in a compressed form in ScopeInfo objects and is used
@@ -3782,6 +3916,9 @@ class ScopeInfo : public FixedArray {
// Return the language mode of this scope.
LanguageMode language_mode();
+ // True if this scope is a (var) declaration scope.
+ bool is_declaration_scope();
+
// Does this scope make a sloppy eval call?
bool CallsSloppyEval() { return CallsEval() && is_sloppy(language_mode()); }
@@ -3869,16 +4006,25 @@ class ScopeInfo : public FixedArray {
// string.
int StackSlotIndex(String* name);
- // Lookup support for serialized scope info. Returns the
- // context slot index for a given slot name if the slot is present; otherwise
+ // Lookup support for serialized scope info. Returns the local context slot
+ // index for a given slot name if the slot is present; otherwise
// returns a value < 0. The name must be an internalized string.
// If the slot is present and mode != NULL, sets *mode to the corresponding
// mode for that variable.
static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
- VariableMode* mode, VariableLocation* location,
- InitializationFlag* init_flag,
+ VariableMode* mode, InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
+ // Similar to ContextSlotIndex() but this method searches only among
+ // global slots of the serialized scope info. Returns the context slot index
+ // for a given slot name if the slot is present; otherwise returns a
+ // value < 0. The name must be an internalized string. If the slot is present
+ // and mode != NULL, sets *mode to the corresponding mode for that variable.
+ static int ContextGlobalSlotIndex(Handle<ScopeInfo> scope_info,
+ Handle<String> name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag);
+
// Lookup the name of a certain context slot by its index.
String* ContextSlotName(int slot_index);
@@ -4005,8 +4151,11 @@ class ScopeInfo : public FixedArray {
STATIC_ASSERT(LANGUAGE_END == 3);
class LanguageModeField
: public BitField<LanguageMode, CallsEvalField::kNext, 2> {};
+ class DeclarationScopeField
+ : public BitField<bool, LanguageModeField::kNext, 1> {};
class ReceiverVariableField
- : public BitField<VariableAllocationInfo, LanguageModeField::kNext, 2> {};
+ : public BitField<VariableAllocationInfo, DeclarationScopeField::kNext,
+ 2> {};
class FunctionVariableField
: public BitField<VariableAllocationInfo, ReceiverVariableField::kNext,
2> {};
@@ -4126,14 +4275,25 @@ class BytecodeArray : public FixedArrayBase {
// Returns data start address.
inline Address GetFirstBytecodeAddress();
- // Accessors for frame size and the number of locals
+ // Accessors for frame size.
inline int frame_size() const;
- inline void set_frame_size(int value);
+ inline void set_frame_size(int frame_size);
+
+ // Accessor for register count (derived from frame_size).
+ inline int register_count() const;
+
+ // Accessors for parameter count (including implicit 'this' receiver).
+ inline int parameter_count() const;
+ inline void set_parameter_count(int number_of_parameters);
+
+ // Accessors for the constant pool.
+ DECL_ACCESSORS(constant_pool, FixedArray)
DECLARE_CAST(BytecodeArray)
// Dispatched behavior.
inline int BytecodeArraySize();
+ inline void BytecodeArrayIterateBody(ObjectVisitor* v);
DECLARE_PRINTER(BytecodeArray)
DECLARE_VERIFIER(BytecodeArray)
@@ -4142,7 +4302,9 @@ class BytecodeArray : public FixedArrayBase {
// Layout description.
static const int kFrameSizeOffset = FixedArrayBase::kHeaderSize;
- static const int kHeaderSize = kFrameSizeOffset + kIntSize;
+ static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
+ static const int kConstantPoolOffset = kParameterSizeOffset + kIntSize;
+ static const int kHeaderSize = kConstantPoolOffset + kPointerSize;
static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
@@ -4267,7 +4429,7 @@ class FixedTypedArray: public FixedTypedArrayBase {
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
- void SetValue(uint32_t index, Object* value);
+ inline void SetValue(uint32_t index, Object* value);
DECLARE_PRINTER(FixedTypedArray)
DECLARE_VERIFIER(FixedTypedArray)
@@ -4409,6 +4571,40 @@ class DeoptimizationOutputData: public FixedArray {
};
+// A literals array contains the literals for a JSFunction. It also holds
+// the type feedback vector.
+class LiteralsArray : public FixedArray {
+ public:
+ static const int kVectorIndex = 0;
+ static const int kFirstLiteralIndex = 1;
+ static const int kOffsetToFirstLiteral =
+ FixedArray::kHeaderSize + kPointerSize;
+
+ static int OffsetOfLiteralAt(int index) {
+ return SizeFor(index + kFirstLiteralIndex);
+ }
+
+ inline TypeFeedbackVector* feedback_vector() const;
+ inline void set_feedback_vector(TypeFeedbackVector* vector);
+ inline Object* literal(int literal_index) const;
+ inline void set_literal(int literal_index, Object* literal);
+ inline int literals_count() const;
+
+ static Handle<LiteralsArray> New(Isolate* isolate,
+ Handle<TypeFeedbackVector> vector,
+ int number_of_literals,
+ PretenureFlag pretenure);
+
+ DECLARE_CAST(LiteralsArray)
+
+ private:
+ inline Object* get(int index) const;
+ inline void set(int index, Object* value);
+ inline void set(int index, Smi* value);
+ inline void set(int index, Object* value, WriteBarrierMode mode);
+};
+
+
// HandlerTable is a fixed array containing entries for exception handlers in
// the code object it is associated with. The tables comes in two flavors:
// 1) Based on ranges: Used for unoptimized code. Contains one entry per
@@ -4484,7 +4680,8 @@ class Code: public HeapObject {
V(STUB) \
V(HANDLER) \
V(BUILTIN) \
- V(REGEXP)
+ V(REGEXP) \
+ V(WASM_FUNCTION)
#define IC_KIND_LIST(V) \
V(LOAD_IC) \
@@ -5199,7 +5396,7 @@ class Map: public HeapObject {
STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
class DictionaryMap : public BitField<bool, 20, 1> {};
class OwnsDescriptors : public BitField<bool, 21, 1> {};
- class HasInstanceCallHandler : public BitField<bool, 22, 1> {};
+ class IsHiddenPrototype : public BitField<bool, 22, 1> {};
class Deprecated : public BitField<bool, 23, 1> {};
class IsUnstable : public BitField<bool, 24, 1> {};
class IsMigrationTarget : public BitField<bool, 25, 1> {};
@@ -5225,16 +5422,15 @@ class Map: public HeapObject {
inline void set_non_instance_prototype(bool value);
inline bool has_non_instance_prototype();
- // Tells whether function has special prototype property. If not, prototype
- // property will not be created when accessed (will return undefined),
- // and construction from this function will not be allowed.
- inline void set_function_with_prototype(bool value);
- inline bool function_with_prototype();
+ // Tells whether the instance has a [[Construct]] internal method.
+ // This property is implemented according to ES6, section 7.2.4.
+ inline void set_is_constructor(bool value);
+ inline bool is_constructor() const;
// Tells whether the instance with this map should be ignored by the
// Object.getPrototypeOf() function and the __proto__ accessor.
inline void set_is_hidden_prototype();
- inline bool is_hidden_prototype();
+ inline bool is_hidden_prototype() const;
// Records and queries whether the instance has a named interceptor.
inline void set_has_named_interceptor();
@@ -5257,6 +5453,11 @@ class Map: public HeapObject {
inline void set_is_observed();
inline bool is_observed();
+ // Tells whether the instance has a [[Call]] internal method.
+ // This property is implemented according to ES6, section 7.2.3.
+ inline void set_is_callable();
+ inline bool is_callable() const;
+
inline void set_is_strong();
inline bool is_strong();
inline void set_is_extensible(bool value);
@@ -5431,8 +5632,6 @@ class Map: public HeapObject {
inline bool owns_descriptors();
inline void set_owns_descriptors(bool owns_descriptors);
- inline bool has_instance_call_handler();
- inline void set_has_instance_call_handler();
inline void mark_unstable();
inline bool is_stable();
inline void set_migration_target(bool value);
@@ -5583,6 +5782,7 @@ class Map: public HeapObject {
inline bool IsPrimitiveMap();
inline bool IsJSObjectMap();
inline bool IsJSArrayMap();
+ inline bool IsJSFunctionMap();
inline bool IsStringMap();
inline bool IsJSProxyMap();
inline bool IsJSGlobalProxyMap();
@@ -5681,17 +5881,17 @@ class Map: public HeapObject {
// Bit positions for bit field.
static const int kHasNonInstancePrototype = 0;
- static const int kIsHiddenPrototype = 1;
+ static const int kIsCallable = 1;
static const int kHasNamedInterceptor = 2;
static const int kHasIndexedInterceptor = 3;
static const int kIsUndetectable = 4;
static const int kIsObserved = 5;
static const int kIsAccessCheckNeeded = 6;
- class FunctionWithPrototype: public BitField<bool, 7, 1> {};
+ static const int kIsConstructor = 7;
// Bit positions for bit field 2
static const int kIsExtensible = 0;
- static const int kStringWrapperSafeForDefaultValueOf = 1;
+ // Bit 1 is free.
class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
class ElementsKindBits: public BitField<ElementsKind, 3, 5> {};
@@ -5847,6 +6047,10 @@ class PrototypeInfo : public Struct {
inline void set_registry_slot(int slot);
// [validity_cell]: Cell containing the validity bit for prototype chains
// going through this object, or Smi(0) if uninitialized.
+ // When a prototype object changes its map, then both its own validity cell
+ // and those of all "downstream" prototypes are invalidated; handlers for a
+ // given receiver embed the currently valid cell for that receiver's prototype
+ // during their compilation and check it on execution.
DECL_ACCESSORS(validity_cell, Object)
// [constructor_name]: User-friendly name of the original constructor.
DECL_ACCESSORS(constructor_name, Object)
@@ -5868,6 +6072,32 @@ class PrototypeInfo : public Struct {
};
+// Pair used to store both a ScopeInfo and an extension object in the extension
+// slot of a block context. Needed in the rare case where a declaration block
+// scope (a "varblock" as used to desugar parameter destructuring) also contains
+// a sloppy direct eval. (In no other case both are needed at the same time.)
+class SloppyBlockWithEvalContextExtension : public Struct {
+ public:
+ // [scope_info]: Scope info.
+ DECL_ACCESSORS(scope_info, ScopeInfo)
+ // [extension]: Extension object.
+ DECL_ACCESSORS(extension, JSObject)
+
+ DECLARE_CAST(SloppyBlockWithEvalContextExtension)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(SloppyBlockWithEvalContextExtension)
+ DECLARE_VERIFIER(SloppyBlockWithEvalContextExtension)
+
+ static const int kScopeInfoOffset = HeapObject::kHeaderSize;
+ static const int kExtensionOffset = kScopeInfoOffset + kPointerSize;
+ static const int kSize = kExtensionOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyBlockWithEvalContextExtension);
+};
+
+
// Script describes a script which has been added to the VM.
class Script: public Struct {
public:
@@ -5897,14 +6127,14 @@ class Script: public Struct {
DECL_ACCESSORS(name, Object)
// [id]: the script id.
- DECL_ACCESSORS(id, Smi)
+ DECL_INT_ACCESSORS(id)
// [line_offset]: script line offset in resource from where it was extracted.
- DECL_ACCESSORS(line_offset, Smi)
+ DECL_INT_ACCESSORS(line_offset)
// [column_offset]: script column offset in resource from where it was
// extracted.
- DECL_ACCESSORS(column_offset, Smi)
+ DECL_INT_ACCESSORS(column_offset)
// [context_data]: context data for the context this script was compiled in.
DECL_ACCESSORS(context_data, Object)
@@ -5913,7 +6143,7 @@ class Script: public Struct {
DECL_ACCESSORS(wrapper, HeapObject)
// [type]: the script type.
- DECL_ACCESSORS(type, Smi)
+ DECL_INT_ACCESSORS(type)
// [line_ends]: FixedArray of line ends positions.
DECL_ACCESSORS(line_ends, Object)
@@ -5924,14 +6154,14 @@ class Script: public Struct {
// [eval_from_instructions_offset]: the instruction offset in the code for the
// function from which eval was called where eval was called.
- DECL_ACCESSORS(eval_from_instructions_offset, Smi)
+ DECL_INT_ACCESSORS(eval_from_instructions_offset)
// [shared_function_infos]: weak fixed array containing all shared
// function infos created from this script.
DECL_ACCESSORS(shared_function_infos, Object)
// [flags]: Holds an exciting bitfield.
- DECL_ACCESSORS(flags, Smi)
+ DECL_INT_ACCESSORS(flags)
// [source_url]: sourceURL from magic comment
DECL_ACCESSORS(source_url, Object)
@@ -5949,6 +6179,11 @@ class Script: public Struct {
inline CompilationState compilation_state();
inline void set_compilation_state(CompilationState state);
+ // [hide_source]: determines whether the script source can be exposed as
+ // function source. Encoded in the 'flags' field.
+ inline bool hide_source();
+ inline void set_hide_source(bool value);
+
// [origin_options]: optional attributes set by the embedder via ScriptOrigin,
// and used by the embedder to make decisions about the script. V8 just passes
// this through. Encoded in the 'flags' field.
@@ -5981,6 +6216,17 @@ class Script: public Struct {
// that matches the function literal. Return empty handle if not found.
MaybeHandle<SharedFunctionInfo> FindSharedFunctionInfo(FunctionLiteral* fun);
+ // Iterate over all script objects on the heap.
+ class Iterator {
+ public:
+ explicit Iterator(Isolate* isolate);
+ Script* Next();
+
+ private:
+ WeakFixedArray::Iterator iterator_;
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
// Dispatched behavior.
DECLARE_PRINTER(Script)
DECLARE_VERIFIER(Script)
@@ -6010,7 +6256,8 @@ class Script: public Struct {
// Bit positions in the flags field.
static const int kCompilationTypeBit = 0;
static const int kCompilationStateBit = 1;
- static const int kOriginOptionsShift = 2;
+ static const int kHideSourceBit = 2;
+ static const int kOriginOptionsShift = 3;
static const int kOriginOptionsSize = 3;
static const int kOriginOptionsMask = ((1 << kOriginOptionsSize) - 1)
<< kOriginOptionsShift;
@@ -6082,7 +6329,7 @@ enum BuiltinFunctionId {
// that both {code} and {literals} can be NULL to pass search result status.
struct CodeAndLiterals {
Code* code; // Cached optimized code.
- FixedArray* literals; // Cached literals array.
+ LiteralsArray* literals; // Cached literals array.
};
@@ -6102,15 +6349,18 @@ class SharedFunctionInfo: public HeapObject {
DECL_ACCESSORS(optimized_code_map, Object)
// Returns entry from optimized code map for specified context and OSR entry.
- // Note that {code == nullptr} indicates no matching entry has been found,
- // whereas {literals == nullptr} indicates the code is context-independent.
+ // Note that {code == nullptr, literals == nullptr} indicates no matching
+ // entry has been found, whereas {code, literals == nullptr} indicates that
+ // code is context-independent.
CodeAndLiterals SearchOptimizedCodeMap(Context* native_context,
BailoutId osr_ast_id);
// Clear optimized code map.
void ClearOptimizedCodeMap();
- // Removed a specific optimized code object from the optimized code map.
+ // Removes a specific optimized code object from the optimized code map.
+ // In case of non-OSR the code reference is cleared from the cache entry but
+ // the entry itself is left in the map in order to proceed sharing literals.
void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
// Trims the optimized code map after entries have been removed.
@@ -6121,10 +6371,12 @@ class SharedFunctionInfo: public HeapObject {
Handle<Code> code);
// Add a new entry to the optimized code map for context-dependent code.
+ // |code| is either a code object or an undefined value. In the latter case
+ // the entry just maps |native_context, osr_ast_id| pair to |literals| array.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
- Handle<Code> code,
- Handle<FixedArray> literals,
+ Handle<HeapObject> code,
+ Handle<LiteralsArray> literals,
BailoutId osr_ast_id);
// Set up the link between shared function info and the script. The shared
@@ -6143,6 +6395,8 @@ class SharedFunctionInfo: public HeapObject {
static const int kEntryLength = 4;
static const int kInitialLength = kEntriesStart + kEntryLength;
+ static const int kNotFound = -1;
+
// [scope_info]: Scope info.
DECL_ACCESSORS(scope_info, ScopeInfo)
@@ -6420,6 +6674,9 @@ class SharedFunctionInfo: public HeapObject {
// Tells whether this function should be subject to debugging.
inline bool IsSubjectToDebugging();
+ // Whether this function is defined in native code or extensions.
+ inline bool IsBuiltin();
+
// Check whether or not this function is inlineable.
bool IsInlineable();
@@ -6444,6 +6701,23 @@ class SharedFunctionInfo: public HeapObject {
void ResetForNewContext(int new_ic_age);
+ // Iterate over all shared function infos that are created from a script.
+ // That excludes shared function infos created for API functions and C++
+ // builtins.
+ class Iterator {
+ public:
+ explicit Iterator(Isolate* isolate);
+ SharedFunctionInfo* Next();
+
+ private:
+ bool NextScript();
+
+ Script::Iterator script_iterator_;
+ WeakFixedArray::Iterator sfi_iterator_;
+ DisallowHeapAllocation no_gc_;
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
DECLARE_CAST(SharedFunctionInfo)
// Constants.
@@ -6595,12 +6869,12 @@ class SharedFunctionInfo: public HeapObject {
kAllowLazyCompilation,
kAllowLazyCompilationWithoutContext,
kOptimizationDisabled,
+ kNative,
kStrictModeFunction,
kStrongModeFunction,
kUsesArguments,
kNeedsHomeObject,
kHasDuplicateParameters,
- kNative,
kForceInline,
kBoundFunction,
kIsAnonymous,
@@ -6659,6 +6933,9 @@ class SharedFunctionInfo: public HeapObject {
static const int kNativeBitWithinByte =
(kNative + kCompilerHintsSmiTagSize) % kBitsPerByte;
+ static const int kBoundBitWithinByte =
+ (kBoundFunction + kCompilerHintsSmiTagSize) % kBitsPerByte;
+
#if defined(V8_TARGET_LITTLE_ENDIAN)
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kStrictModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
@@ -6667,6 +6944,9 @@ class SharedFunctionInfo: public HeapObject {
(kStrongModeFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
static const int kNativeByteOffset = kCompilerHintsOffset +
(kNative + kCompilerHintsSmiTagSize) / kBitsPerByte;
+ static const int kBoundByteOffset =
+ kCompilerHintsOffset +
+ (kBoundFunction + kCompilerHintsSmiTagSize) / kBitsPerByte;
#elif defined(V8_TARGET_BIG_ENDIAN)
static const int kStrictModeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
@@ -6677,11 +6957,20 @@ class SharedFunctionInfo: public HeapObject {
static const int kNativeByteOffset = kCompilerHintsOffset +
(kCompilerHintsSize - 1) -
((kNative + kCompilerHintsSmiTagSize) / kBitsPerByte);
+ static const int kBoundByteOffset =
+ kCompilerHintsOffset + (kCompilerHintsSize - 1) -
+ ((kBoundFunction + kCompilerHintsSmiTagSize) / kBitsPerByte);
#else
#error Unknown byte ordering
#endif
private:
+ // Returns entry from optimized code map for specified context and OSR entry.
+ // The result is either kNotFound, kSharedCodeIndex for context-independent
+ // entry or a start index of the context-dependent entry.
+ int SearchOptimizedCodeMapEntry(Context* native_context,
+ BailoutId osr_ast_id);
+
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -6744,17 +7033,6 @@ class JSGeneratorObject: public JSObject {
// Resume mode, for use by runtime functions.
enum ResumeMode { NEXT, THROW };
- // Yielding from a generator returns an object with the following inobject
- // properties. See Context::iterator_result_map() for the map.
- static const int kResultValuePropertyIndex = 0;
- static const int kResultDonePropertyIndex = 1;
- static const int kResultPropertyCount = 2;
-
- static const int kResultValuePropertyOffset = JSObject::kHeaderSize;
- static const int kResultDonePropertyOffset =
- kResultValuePropertyOffset + kPointerSize;
- static const int kResultSize = kResultDonePropertyOffset + kPointerSize;
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
};
@@ -6894,8 +7172,8 @@ class JSFunction: public JSObject {
// arguments. Bound functions never contain literals.
DECL_ACCESSORS(literals_or_bindings, FixedArray)
- inline FixedArray* literals();
- inline void set_literals(FixedArray* literals);
+ inline LiteralsArray* literals();
+ inline void set_literals(LiteralsArray* literals);
inline FixedArray* function_bindings();
inline void set_function_bindings(FixedArray* bindings);
@@ -6920,15 +7198,9 @@ class JSFunction: public JSObject {
static void SetInstancePrototype(Handle<JSFunction> function,
Handle<Object> value);
- // Creates a new closure for the fucntion with the same bindings,
- // bound values, and prototype. An equivalent of spec operations
- // ``CloneMethod`` and ``CloneBoundFunction``.
- static Handle<JSFunction> CloneClosure(Handle<JSFunction> function);
-
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
bool RemovePrototype();
- inline bool should_have_prototype();
// Accessor for this function's initial map's [[class]]
// property. This is primarily used by ECMA native functions. This
@@ -7091,27 +7363,14 @@ class JSGlobalObject: public GlobalObject {
// JavaScript.
class JSBuiltinsObject: public GlobalObject {
public:
- // Accessors for the runtime routines written in JavaScript.
- inline Object* javascript_builtin(Builtins::JavaScript id);
- inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
-
DECLARE_CAST(JSBuiltinsObject)
// Dispatched behavior.
DECLARE_PRINTER(JSBuiltinsObject)
DECLARE_VERIFIER(JSBuiltinsObject)
- // Layout description. The size of the builtins object includes
- // room for two pointers per runtime routine written in javascript
- // (function and code object).
- static const int kJSBuiltinsCount = Builtins::id_count;
- static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
- static const int kSize =
- GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
-
- static int OffsetOfFunctionWithId(Builtins::JavaScript id) {
- return kJSBuiltinsOffset + id * kPointerSize;
- }
+ // Layout description.
+ static const int kSize = GlobalObject::kHeaderSize;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
@@ -7173,6 +7432,9 @@ class JSDate: public JSObject {
void SetValue(Object* value, bool is_value_nan);
+ // ES6 section 20.3.4.45 Date.prototype [ @@toPrimitive ]
+ static MUST_USE_RESULT MaybeHandle<Object> ToPrimitive(
+ Handle<JSReceiver> receiver, Handle<Object> hint);
// Dispatched behavior.
DECLARE_PRINTER(JSDate)
@@ -7716,8 +7978,8 @@ class AllocationSite: public Struct {
// walked in a particular order. So [[1, 2], 1, 2] will have one
// nested_site, but [[1, 2], 3, [4]] will have a list of two.
DECL_ACCESSORS(nested_site, Object)
- DECL_ACCESSORS(pretenure_data, Smi)
- DECL_ACCESSORS(pretenure_create_count, Smi)
+ DECL_INT_ACCESSORS(pretenure_data)
+ DECL_INT_ACCESSORS(pretenure_create_count)
DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
@@ -8023,6 +8285,10 @@ class Name: public HeapObject {
static inline Handle<Name> Flatten(Handle<Name> name,
PretenureFlag pretenure = NOT_TENURED);
+ // Return a string version of this name that is converted according to the
+ // rules described in ES6 section 9.2.11.
+ MUST_USE_RESULT static MaybeHandle<String> ToFunctionName(Handle<Name> name);
+
DECLARE_CAST(Name)
DECLARE_PRINTER(Name)
@@ -8102,7 +8368,7 @@ class Symbol: public Name {
// [name]: The print name of a symbol, or undefined if none.
DECL_ACCESSORS(name, Object)
- DECL_ACCESSORS(flags, Smi)
+ DECL_INT_ACCESSORS(flags)
// [is_private]: Whether this is a private symbol. Private symbols can only
// be used to designate own properties of objects.
@@ -8286,6 +8552,9 @@ class String: public Name {
// to this method are not efficient unless the string is flat.
INLINE(uint16_t Get(int index));
+ // ES6 section 7.1.3.1 ToNumber Applied to the String Type
+ static Handle<Object> ToNumber(Handle<String> subject);
+
// Flattens the string. Checks first inline to see if it is
// necessary. Does nothing if the string is not a cons string.
// Flattening allocates a sequential string with the same data as
@@ -8313,6 +8582,20 @@ class String: public Name {
// Requires: StringShape(this).IsIndirect() && this->IsFlat()
inline String* GetUnderlying();
+ // String relational comparison, implemented according to ES6 section 7.2.11
+ // Abstract Relational Comparison (step 5): The comparison of Strings uses a
+ // simple lexicographic ordering on sequences of code unit values. There is no
+ // attempt to use the more complex, semantically oriented definitions of
+ // character or string equality and collating order defined in the Unicode
+ // specification. Therefore String values that are canonically equal according
+ // to the Unicode standard could test as unequal. In effect this algorithm
+ // assumes that both Strings are already in normalized form. Also, note that
+ // for strings containing supplementary characters, lexicographic ordering on
+ // sequences of UTF-16 code unit values differs from that on sequences of code
+ // point values.
+ MUST_USE_RESULT static ComparisonResult Compare(Handle<String> x,
+ Handle<String> y);
+
// String equality operations.
inline bool Equals(String* other);
inline static bool Equals(Handle<String> one, Handle<String> two);
@@ -8945,6 +9228,9 @@ class Oddball: public HeapObject {
inline byte kind() const;
inline void set_kind(byte kind);
+ // ES6 section 7.1.3 ToNumber for Boolean, Null, Undefined.
+ MUST_USE_RESULT static inline Handle<Object> ToNumber(Handle<Oddball> input);
+
DECLARE_CAST(Oddball)
// Dispatched behavior.
@@ -9194,7 +9480,7 @@ class JSProxy: public JSReceiver {
class JSFunctionProxy: public JSProxy {
public:
// [call_trap]: The call trap.
- DECL_ACCESSORS(call_trap, Object)
+ DECL_ACCESSORS(call_trap, JSReceiver)
// [construct_trap]: The construct trap.
DECL_ACCESSORS(construct_trap, Object)
@@ -9241,6 +9527,9 @@ class JSSet : public JSCollection {
public:
DECLARE_CAST(JSSet)
+ static void Initialize(Handle<JSSet> set, Isolate* isolate);
+ static void Clear(Handle<JSSet> set);
+
// Dispatched behavior.
DECLARE_PRINTER(JSSet)
DECLARE_VERIFIER(JSSet)
@@ -9255,6 +9544,9 @@ class JSMap : public JSCollection {
public:
DECLARE_CAST(JSMap)
+ static void Initialize(Handle<JSMap> map, Isolate* isolate);
+ static void Clear(Handle<JSMap> map);
+
// Dispatched behavior.
DECLARE_PRINTER(JSMap)
DECLARE_VERIFIER(JSMap)
@@ -9370,6 +9662,40 @@ class JSMapIterator: public OrderedHashTableIterator<JSMapIterator,
};
+// ES6 section 25.1.1.3 The IteratorResult Interface
+class JSIteratorResult final : public JSObject {
+ public:
+ // [done]: This is the result status of an iterator next method call. If the
+ // end of the iterator was reached done is true. If the end was not reached
+ // done is false and a [value] is available.
+ DECL_ACCESSORS(done, Object)
+
+ // [value]: If [done] is false, this is the current iteration element value.
+ // If [done] is true, this is the return value of the iterator, if it supplied
+ // one. If the iterator does not have a return value, value is undefined.
+ // In that case, the value property may be absent from the conforming object
+ // if it does not inherit an explicit value property.
+ DECL_ACCESSORS(value, Object)
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSIteratorResult)
+ DECLARE_VERIFIER(JSIteratorResult)
+
+ DECLARE_CAST(JSIteratorResult)
+
+ static const int kValueOffset = JSObject::kHeaderSize;
+ static const int kDoneOffset = kValueOffset + kPointerSize;
+ static const int kSize = kDoneOffset + kPointerSize;
+
+ // Indices of in-object properties.
+ static const int kValueIndex = 0;
+ static const int kDoneIndex = 1;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSIteratorResult);
+};
+
+
// Base class for both JSWeakMap and JSWeakSet
class JSWeakCollection: public JSObject {
public:
@@ -9379,6 +9705,12 @@ class JSWeakCollection: public JSObject {
// [next]: linked list of encountered weak maps during GC.
DECL_ACCESSORS(next, Object)
+ static void Initialize(Handle<JSWeakCollection> collection, Isolate* isolate);
+ static void Set(Handle<JSWeakCollection> collection, Handle<Object> key,
+ Handle<Object> value, int32_t hash);
+ static bool Delete(Handle<JSWeakCollection> collection, Handle<Object> key,
+ int32_t hash);
+
static const int kTableOffset = JSObject::kHeaderSize;
static const int kNextOffset = kTableOffset + kPointerSize;
static const int kSize = kNextOffset + kPointerSize;
@@ -9447,6 +9779,15 @@ class JSArrayBuffer: public JSObject {
void Neuter();
+ static void Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ bool is_external, void* data, size_t allocated_length,
+ SharedFlag shared = SharedFlag::kNotShared);
+
+ static bool SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
+ Isolate* isolate, size_t allocated_length,
+ bool initialize = true,
+ SharedFlag shared = SharedFlag::kNotShared);
+
// Dispatched behavior.
DECLARE_PRINTER(JSArrayBuffer)
DECLARE_VERIFIER(JSArrayBuffer)
@@ -9685,7 +10026,7 @@ class JSRegExpResult: public JSArray {
class AccessorInfo: public Struct {
public:
DECL_ACCESSORS(name, Object)
- DECL_ACCESSORS(flag, Smi)
+ DECL_INT_ACCESSORS(flag)
DECL_ACCESSORS(expected_receiver_type, Object)
inline bool all_can_read();
@@ -9933,7 +10274,7 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_ACCESSORS(signature, Object)
DECL_ACCESSORS(instance_call_handler, Object)
DECL_ACCESSORS(access_check_info, Object)
- DECL_ACCESSORS(flag, Smi)
+ DECL_INT_ACCESSORS(flag)
inline int length() const;
inline void set_length(int value);
@@ -10094,12 +10435,12 @@ class DebugInfo: public Struct {
class BreakPointInfo: public Struct {
public:
// The position in the code for the break point.
- DECL_ACCESSORS(code_position, Smi)
+ DECL_INT_ACCESSORS(code_position)
// The position in the source for the break position.
- DECL_ACCESSORS(source_position, Smi)
+ DECL_INT_ACCESSORS(source_position)
// The position in the source for the last statement before this break
// position.
- DECL_ACCESSORS(statement_position, Smi)
+ DECL_INT_ACCESSORS(statement_position)
// List of related JavaScript break points.
DECL_ACCESSORS(break_point_objects, Object)
@@ -10144,7 +10485,6 @@ class BreakPointInfo: public Struct {
V(kExternalStringsTable, "external_strings_table", "(External strings)") \
V(kStrongRootList, "strong_root_list", "(Strong roots)") \
V(kSmiRootList, "smi_root_list", "(Smi roots)") \
- V(kInternalizedString, "internalized_string", "(Internal string)") \
V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
V(kTop, "top", "(Isolate)") \
V(kRelocatable, "relocatable", "(Relocatable)") \
@@ -10244,22 +10584,13 @@ class StructBodyDescriptor : public
};
-// BooleanBit is a helper class for setting and getting a bit in an
-// integer or Smi.
+// BooleanBit is a helper class for setting and getting a bit in an integer.
class BooleanBit : public AllStatic {
public:
- static inline bool get(Smi* smi, int bit_position) {
- return get(smi->value(), bit_position);
- }
-
static inline bool get(int value, int bit_position) {
return (value & (1 << bit_position)) != 0;
}
- static inline Smi* set(Smi* smi, int bit_position, bool v) {
- return Smi::FromInt(set(smi->value(), bit_position, v));
- }
-
static inline int set(int value, int bit_position, bool v) {
if (v) {
value |= (1 << bit_position);
@@ -10270,6 +10601,29 @@ class BooleanBit : public AllStatic {
}
};
+
+class KeyAccumulator final BASE_EMBEDDED {
+ public:
+ explicit KeyAccumulator(Isolate* isolate) : isolate_(isolate), length_(0) {}
+
+ void AddKey(Handle<Object> key, int check_limit);
+ void AddKeys(Handle<FixedArray> array, FixedArray::KeyFilter filter);
+ void AddKeys(Handle<JSObject> array, FixedArray::KeyFilter filter);
+ void PrepareForComparisons(int count);
+ Handle<FixedArray> GetKeys();
+
+ int GetLength() { return length_; }
+
+ private:
+ void EnsureCapacity(int capacity);
+ void Grow();
+
+ Isolate* isolate_;
+ Handle<FixedArray> keys_;
+ Handle<OrderedHashSet> set_;
+ int length_;
+ DISALLOW_COPY_AND_ASSIGN(KeyAccumulator);
+};
} } // namespace v8::internal
#endif // V8_OBJECTS_H_
diff --git a/deps/v8/src/optimizing-compile-dispatcher.cc b/deps/v8/src/optimizing-compile-dispatcher.cc
index 1f98e7bc95..8e3e96ad00 100644
--- a/deps/v8/src/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/optimizing-compile-dispatcher.cc
@@ -4,12 +4,11 @@
#include "src/optimizing-compile-dispatcher.h"
-#include "src/v8.h"
-
#include "src/base/atomicops.h"
#include "src/full-codegen/full-codegen.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 2b6467ba3d..1bd163ce49 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -72,7 +72,7 @@ ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
- if (!script.is_null() && script->type()->value() == Script::TYPE_NATIVE) {
+ if (!script.is_null() && script->type() == Script::TYPE_NATIVE) {
set_native();
}
}
@@ -86,7 +86,7 @@ ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
set_unicode_cache(isolate_->unicode_cache());
set_script(script);
- if (script->type()->value() == Script::TYPE_NATIVE) {
+ if (script->type() == Script::TYPE_NATIVE) {
set_native();
}
}
@@ -359,7 +359,7 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
body = new (zone()) ZoneList<Statement*>(call_super ? 2 : 1, zone());
AddAssertIsConstruct(body, pos);
if (call_super) {
- // %_DefaultConstructorCallSuper(new.target, .this_function)
+ // %_DefaultConstructorCallSuper(new.target, %GetPrototype(<this-fun>))
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(2, zone());
VariableProxy* new_target_proxy = scope_->NewUnresolved(
@@ -369,11 +369,14 @@ FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
VariableProxy* this_function_proxy = scope_->NewUnresolved(
factory(), ast_value_factory()->this_function_string(),
Variable::NORMAL, pos);
- args->Add(this_function_proxy, zone());
+ ZoneList<Expression*>* tmp =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ tmp->Add(this_function_proxy, zone());
+ Expression* get_prototype =
+ factory()->NewCallRuntime(Runtime::kGetPrototype, tmp, pos);
+ args->Add(get_prototype, zone());
CallRuntime* call = factory()->NewCallRuntime(
- ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kInlineDefaultConstructorCallSuper),
- args, pos);
+ Runtime::kInlineDefaultConstructorCallSuper, args, pos);
body->Add(factory()->NewReturnStatement(call, pos), zone());
}
@@ -628,7 +631,7 @@ Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
// Desugar '+foo' => 'foo*1'
if (op == Token::ADD) {
return factory->NewBinaryOperation(
- Token::MUL, expression, factory->NewNumberLiteral(1, pos), pos);
+ Token::MUL, expression, factory->NewNumberLiteral(1, pos, true), pos);
}
// The same idea for '-foo' => 'foo*(-1)'.
if (op == Token::SUB) {
@@ -671,9 +674,8 @@ Expression* ParserTraits::NewThrowError(Runtime::FunctionId id,
ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
args->Add(parser_->factory()->NewSmiLiteral(message, pos), zone);
args->Add(parser_->factory()->NewStringLiteral(arg, pos), zone);
- CallRuntime* call_constructor = parser_->factory()->NewCallRuntime(
- parser_->ast_value_factory()->empty_string(), Runtime::FunctionForId(id),
- args, pos);
+ CallRuntime* call_constructor =
+ parser_->factory()->NewCallRuntime(id, args, pos);
return parser_->factory()->NewThrow(call_constructor, pos);
}
@@ -917,7 +919,7 @@ Parser::Parser(ParseInfo* info)
set_allow_harmony_sloppy_let(FLAG_harmony_sloppy_let);
set_allow_harmony_rest_parameters(FLAG_harmony_rest_parameters);
set_allow_harmony_default_parameters(FLAG_harmony_default_parameters);
- set_allow_harmony_spreadcalls(FLAG_harmony_spreadcalls);
+ set_allow_harmony_spread_calls(FLAG_harmony_spread_calls);
set_allow_harmony_destructuring(FLAG_harmony_destructuring);
set_allow_harmony_spread_arrays(FLAG_harmony_spread_arrays);
set_allow_harmony_new_target(FLAG_harmony_new_target);
@@ -1201,9 +1203,8 @@ FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
// BindingIdentifier
ParseFormalParameter(&formals, &formals_classifier, &ok);
if (ok) {
- DeclareFormalParameter(
- formals.scope, formals.at(0), formals.is_simple,
- &formals_classifier);
+ DeclareFormalParameter(formals.scope, formals.at(0),
+ &formals_classifier);
}
}
}
@@ -1279,9 +1280,8 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
Scanner::Location old_super_loc = function_state_->super_location();
Statement* stat = ParseStatementListItem(CHECK_OK);
- if (is_strong(language_mode()) &&
- scope_->is_function_scope() &&
- i::IsConstructor(function_state_->kind())) {
+ if (is_strong(language_mode()) && scope_->is_function_scope() &&
+ IsClassConstructor(function_state_->kind())) {
Scanner::Location this_loc = function_state_->this_location();
Scanner::Location super_loc = function_state_->super_location();
if (this_loc.beg_pos != old_this_loc.beg_pos &&
@@ -1328,13 +1328,34 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
// Strong mode implies strict mode. If there are several "use strict"
// / "use strong" directives, do the strict mode changes only once.
if (is_sloppy(scope_->language_mode())) {
- scope_->SetLanguageMode(static_cast<LanguageMode>(
- scope_->language_mode() | STRICT));
+ scope_->SetLanguageMode(
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT));
}
if (use_strong_found) {
- scope_->SetLanguageMode(static_cast<LanguageMode>(
- scope_->language_mode() | STRONG));
+ scope_->SetLanguageMode(
+ static_cast<LanguageMode>(scope_->language_mode() | STRONG));
+ if (IsClassConstructor(function_state_->kind())) {
+ // "use strong" cannot occur in a class constructor body, to avoid
+ // unintuitive strong class object semantics.
+ ParserTraits::ReportMessageAt(
+ token_loc, MessageTemplate::kStrongConstructorDirective);
+ *ok = false;
+ return nullptr;
+ }
+ }
+ if (!scope_->HasSimpleParameters()) {
+ // TC39 deemed "use strict" directives to be an error when occurring
+ // in the body of a function with non-simple parameter list, on
+ // 29/7/2015. https://goo.gl/ueA7Ln
+ //
+ // In V8, this also applies to "use strong " directives.
+ const AstRawString* string = literal->raw_value()->AsString();
+ ParserTraits::ReportMessageAt(
+ token_loc, MessageTemplate::kIllegalLanguageModeDirective,
+ string);
+ *ok = false;
+ return nullptr;
}
// Because declarations in strict eval code don't leak into the scope
// of the eval call, it is likely that functions declared in strict
@@ -1392,7 +1413,7 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
case Token::VAR:
return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::LET:
- if (allow_let()) {
+ if (IsNextLetKeyword()) {
return ParseVariableStatement(kStatementListItem, NULL, ok);
}
break;
@@ -1998,12 +2019,14 @@ VariableProxy* Parser::NewUnresolved(const AstRawString* name,
Variable* Parser::Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind,
- bool resolve, bool* ok) {
+ bool resolve, bool* ok, Scope* scope) {
VariableProxy* proxy = declaration->proxy();
DCHECK(proxy->raw_name() != NULL);
const AstRawString* name = proxy->raw_name();
VariableMode mode = declaration->mode();
- Scope* declaration_scope = DeclarationScope(mode);
+ if (scope == nullptr) scope = scope_;
+ Scope* declaration_scope =
+ IsLexicalVariableMode(mode) ? scope : scope->DeclarationScope();
Variable* var = NULL;
// If a suitable scope exists, then we can statically declare this
@@ -2235,7 +2258,16 @@ Statement* Parser::ParseFunctionDeclaration(
factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
Declare(declaration, DeclarationDescriptor::NORMAL, true, CHECK_OK);
if (names) names->Add(name, zone());
- return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ EmptyStatement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+ if (is_sloppy(language_mode()) && allow_harmony_sloppy_function() &&
+ !scope_->is_declaration_scope()) {
+ SloppyBlockFunctionStatement* delegate =
+ factory()->NewSloppyBlockFunctionStatement(empty, scope_);
+ scope_->DeclarationScope()->sloppy_block_function_map()->Declare(name,
+ delegate);
+ return delegate;
+ }
+ return empty;
}
@@ -2477,6 +2509,7 @@ void Parser::ParseVariableDeclarations(VariableDeclarationContext var_context,
parsing_result->descriptor.declaration_scope =
DeclarationScope(parsing_result->descriptor.mode);
parsing_result->descriptor.scope = scope_;
+ parsing_result->descriptor.hoist_scope = nullptr;
bool first_declaration = true;
@@ -2608,7 +2641,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// Fall through.
case Token::SUPER:
if (is_strong(language_mode()) &&
- i::IsConstructor(function_state_->kind())) {
+ IsClassConstructor(function_state_->kind())) {
bool is_this = peek() == Token::THIS;
Expression* expr;
ExpressionClassifier classifier;
@@ -2639,9 +2672,6 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
}
break;
- // TODO(arv): Handle `let [`
- // https://code.google.com/p/v8/issues/detail?id=3847
-
default:
break;
}
@@ -2691,7 +2721,8 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
// Parsed expression statement, followed by semicolon.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && expr->AsVariableProxy() != NULL &&
+ if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
+ expr->AsVariableProxy() != NULL &&
expr->AsVariableProxy()->raw_name() ==
ast_value_factory()->let_string()) {
ReportMessage(MessageTemplate::kSloppyLexical, NULL);
@@ -2817,7 +2848,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
}
} else {
if (is_strong(language_mode()) &&
- i::IsConstructor(function_state_->kind())) {
+ IsClassConstructor(function_state_->kind())) {
int pos = peek_position();
ReportMessageAt(Scanner::Location(pos, pos + 1),
MessageTemplate::kStrongConstructorReturnValue);
@@ -2852,9 +2883,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
new (zone()) ZoneList<Expression*>(1, zone());
is_spec_object_args->Add(factory()->NewVariableProxy(temp), zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
- ast_value_factory()->is_spec_object_string(),
- Runtime::FunctionForId(Runtime::kInlineIsSpecObject),
- is_spec_object_args, pos);
+ Runtime::kInlineIsSpecObject, is_spec_object_args, pos);
// %_IsSpecObject(temp) ? temp : throw_expression
Expression* is_object_conditional = factory()->NewConditional(
@@ -2964,31 +2993,82 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
}
-SwitchStatement* Parser::ParseSwitchStatement(
- ZoneList<const AstRawString*>* labels, bool* ok) {
+Statement* Parser::ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok) {
// SwitchStatement ::
// 'switch' '(' Expression ')' '{' CaseClause* '}'
+ // In order to get the CaseClauses to execute in their own lexical scope,
+ // but without requiring downstream code to have special scope handling
+ // code for switch statements, desugar into blocks as follows:
+ // { // To group the statements--harmless to evaluate Expression in scope
+ // .tag_variable = Expression;
+ // { // To give CaseClauses a scope
+ // switch (.tag_variable) { CaseClause* }
+ // }
+ // }
- SwitchStatement* statement =
- factory()->NewSwitchStatement(labels, peek_position());
- Target target(&this->target_stack_, statement);
+ Block* switch_block =
+ factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+ int switch_pos = peek_position();
Expect(Token::SWITCH, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
Expression* tag = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- bool default_seen = false;
- ZoneList<CaseClause*>* cases = new(zone()) ZoneList<CaseClause*>(4, zone());
- Expect(Token::LBRACE, CHECK_OK);
- while (peek() != Token::RBRACE) {
- CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
- cases->Add(clause, zone());
+ Variable* tag_variable =
+ scope_->NewTemporary(ast_value_factory()->dot_switch_tag_string());
+ Assignment* tag_assign = factory()->NewAssignment(
+ Token::ASSIGN, factory()->NewVariableProxy(tag_variable), tag,
+ tag->position());
+ Statement* tag_statement =
+ factory()->NewExpressionStatement(tag_assign, RelocInfo::kNoPosition);
+ switch_block->AddStatement(tag_statement, zone());
+
+ // make statement: undefined;
+ // This is needed so the tag isn't returned as the value, in case the switch
+ // statements don't have a value.
+ switch_block->AddStatement(
+ factory()->NewExpressionStatement(
+ factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ zone());
+
+ Block* cases_block =
+ factory()->NewBlock(NULL, 1, false, RelocInfo::kNoPosition);
+ Scope* cases_scope = NewScope(scope_, BLOCK_SCOPE);
+ cases_scope->SetNonlinear();
+
+ SwitchStatement* switch_statement =
+ factory()->NewSwitchStatement(labels, switch_pos);
+
+ cases_scope->set_start_position(scanner()->location().beg_pos);
+ {
+ BlockState cases_block_state(&scope_, cases_scope);
+ Target target(&this->target_stack_, switch_statement);
+
+ Expression* tag_read = factory()->NewVariableProxy(tag_variable);
+
+ bool default_seen = false;
+ ZoneList<CaseClause*>* cases =
+ new (zone()) ZoneList<CaseClause*>(4, zone());
+ Expect(Token::LBRACE, CHECK_OK);
+ while (peek() != Token::RBRACE) {
+ CaseClause* clause = ParseCaseClause(&default_seen, CHECK_OK);
+ cases->Add(clause, zone());
+ }
+ switch_statement->Initialize(tag_read, cases);
+ cases_block->AddStatement(switch_statement, zone());
}
Expect(Token::RBRACE, CHECK_OK);
- if (statement) statement->Initialize(tag, cases);
- return statement;
+ cases_scope->set_end_position(scanner()->location().end_pos);
+ cases_scope = cases_scope->FinalizeBlockScope();
+ cases_block->set_scope(cases_scope);
+
+ switch_block->AddStatement(cases_block, zone());
+
+ return switch_block;
}
@@ -3164,9 +3244,7 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
new (zone()) ZoneList<Expression*>(1, zone());
is_spec_object_args->Add(left, zone());
Expression* is_spec_object_call = factory()->NewCallRuntime(
- ast_value_factory()->is_spec_object_string(),
- Runtime::FunctionForId(Runtime::kInlineIsSpecObject), is_spec_object_args,
- pos);
+ Runtime::kInlineIsSpecObject, is_spec_object_args, pos);
// %ThrowIteratorResultNotAnObject(result)
Expression* result_proxy_again = factory()->NewVariableProxy(result);
@@ -3174,9 +3252,7 @@ Expression* Parser::BuildIteratorNextResult(Expression* iterator,
new (zone()) ZoneList<Expression*>(1, zone());
throw_arguments->Add(result_proxy_again, zone());
Expression* throw_call = factory()->NewCallRuntime(
- ast_value_factory()->throw_iterator_result_not_an_object_string(),
- Runtime::FunctionForId(Runtime::kThrowIteratorResultNotAnObject),
- throw_arguments, pos);
+ Runtime::kThrowIteratorResultNotAnObject, throw_arguments, pos);
return factory()->NewBinaryOperation(
Token::AND,
@@ -3516,7 +3592,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
DeclarationParsingResult parsing_result;
if (peek() != Token::SEMICOLON) {
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
- (peek() == Token::LET && allow_let())) {
+ (peek() == Token::LET && IsNextLetKeyword())) {
ParseVariableDeclarations(kForStatement, &parsing_result, CHECK_OK);
is_const = parsing_result.descriptor.mode == CONST;
@@ -3539,7 +3615,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
return nullptr;
}
if (parsing_result.first_initializer_loc.IsValid() &&
- (is_strict(language_mode()) || mode == ForEachStatement::ITERATE)) {
+ (is_strict(language_mode()) || mode == ForEachStatement::ITERATE ||
+ IsLexicalVariableMode(parsing_result.descriptor.mode))) {
if (mode == ForEachStatement::ITERATE) {
ReportMessageAt(parsing_result.first_initializer_loc,
MessageTemplate::kForOfLoopInitializer);
@@ -3724,8 +3801,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// Parsed initializer at this point.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
- is_let_identifier_expression) {
+ if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
+ is_sloppy(language_mode()) && is_let_identifier_expression) {
ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return NULL;
@@ -3749,9 +3826,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Statement* next = NULL;
if (peek() != Token::RPAREN) {
- int next_pos = position();
Expression* exp = ParseExpression(true, CHECK_OK);
- next = factory()->NewExpressionStatement(exp, next_pos);
+ next = factory()->NewExpressionStatement(exp, exp->position());
}
Expect(Token::RPAREN, CHECK_OK);
@@ -3855,8 +3931,7 @@ Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) {
void ParserTraits::ParseArrowFunctionFormalParameters(
ParserFormalParameters* parameters, Expression* expr,
- const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok) {
+ const Scanner::Location& params_loc, bool* ok) {
if (parameters->Arity() >= Code::kMaxArguments) {
ReportMessageAt(params_loc, MessageTemplate::kMalformedArrowFunParamList);
*ok = false;
@@ -3883,8 +3958,7 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
DCHECK_EQ(binop->op(), Token::COMMA);
Expression* left = binop->left();
Expression* right = binop->right();
- ParseArrowFunctionFormalParameters(parameters, left, params_loc,
- duplicate_loc, ok);
+ ParseArrowFunctionFormalParameters(parameters, left, params_loc, ok);
if (!*ok) return;
// LHS of comma expression should be unparenthesized.
expr = right;
@@ -3897,11 +3971,15 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
if (is_rest) {
expr = expr->AsSpread()->expression();
parameters->has_rest = true;
+ parameters->rest_array_literal_index =
+ parser_->function_state_->NextMaterializedLiteralIndex();
+ ++parameters->materialized_literals_count;
}
if (parameters->is_simple) {
parameters->is_simple = !is_rest && expr->IsVariableProxy();
}
+ Expression* initializer = nullptr;
if (expr->IsVariableProxy()) {
// When the formal parameter was originally seen, it was parsed as a
// VariableProxy and recorded as unresolved in the scope. Here we undo that
@@ -3909,18 +3987,12 @@ void ParserTraits::ParseArrowFunctionFormalParameters(
// patterns; for patterns that happens uniformly in
// PatternRewriter::VisitVariableProxy).
parser_->scope_->RemoveUnresolved(expr->AsVariableProxy());
- }
-
- Expression* initializer = nullptr;
- if (!is_rest && parser_->allow_harmony_default_parameters() &&
- parser_->Check(Token::ASSIGN)) {
- ExpressionClassifier init_classifier;
- initializer =
- parser_->ParseAssignmentExpression(true, &init_classifier, ok);
- if (!*ok) return;
- parser_->ValidateExpression(&init_classifier, ok);
- if (!*ok) return;
- parameters->is_simple = false;
+ } else if (expr->IsAssignment()) {
+ Assignment* assignment = expr->AsAssignment();
+ DCHECK(parser_->allow_harmony_default_parameters());
+ DCHECK(!assignment->is_compound());
+ initializer = assignment->value();
+ expr = assignment->target();
}
AddFormalParameter(parameters, expr, initializer, is_rest);
@@ -3931,15 +4003,18 @@ void ParserTraits::ParseArrowFunctionFormalParameterList(
ParserFormalParameters* parameters, Expression* expr,
const Scanner::Location& params_loc,
Scanner::Location* duplicate_loc, bool* ok) {
- ParseArrowFunctionFormalParameters(parameters, expr, params_loc,
- duplicate_loc, ok);
+ if (expr->IsEmptyParentheses()) return;
+
+ ParseArrowFunctionFormalParameters(parameters, expr, params_loc, ok);
if (!*ok) return;
+ ExpressionClassifier classifier;
+ if (!parameters->is_simple) {
+ classifier.RecordNonSimpleParameter();
+ }
for (int i = 0; i < parameters->Arity(); ++i) {
auto parameter = parameters->at(i);
- ExpressionClassifier classifier;
- DeclareFormalParameter(
- parameters->scope, parameter, parameters->is_simple, &classifier);
+ DeclareFormalParameter(parameters->scope, parameter, &classifier);
if (!duplicate_loc->IsValid()) {
*duplicate_loc = classifier.duplicate_formal_parameter_error().location;
}
@@ -3955,6 +4030,11 @@ void ParserTraits::ReindexLiterals(const ParserFormalParameters& parameters) {
for (const auto p : parameters.params) {
if (p.pattern != nullptr) reindexer.Reindex(p.pattern);
}
+
+ if (parameters.has_rest) {
+ parameters.rest_array_literal_index = reindexer.NextIndex();
+ }
+
DCHECK(reindexer.count() <=
parser_->function_state_->materialized_literal_count());
}
@@ -4021,7 +4101,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
Scope* scope = function_type == FunctionLiteral::DECLARATION &&
- is_sloppy(language_mode) && !allow_harmony_sloppy() &&
+ is_sloppy(language_mode) &&
+ !allow_harmony_sloppy_function() &&
(original_scope_ == original_declaration_scope ||
declaration_scope != original_declaration_scope)
? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
@@ -4123,6 +4204,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
&expected_property_count, /*CHECK_OK*/ ok,
maybe_bookmark);
+ materialized_literal_count += formals.materialized_literals_count +
+ function_state.materialized_literal_count();
+
if (bookmark.HasBeenReset()) {
// Trigger eager (re-)parsing, just below this block.
is_lazily_parsed = false;
@@ -4201,6 +4285,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
}
+ if (is_sloppy(language_mode) && allow_harmony_sloppy_function()) {
+ InsertSloppyBlockFunctionVarBindings(scope, CHECK_OK);
+ }
if (is_strict(language_mode) || allow_harmony_sloppy()) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
@@ -4221,11 +4308,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
- if (scope->has_rest_parameter()) {
- // TODO(caitp): enable optimization of functions with rest params
- function_literal->set_dont_optimize_reason(kRestParameter);
- }
-
if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
return function_literal;
}
@@ -4315,12 +4397,9 @@ void Parser::AddAssertIsConstruct(ZoneList<Statement*>* body, int pos) {
ZoneList<Expression*>* arguments =
new (zone()) ZoneList<Expression*>(0, zone());
CallRuntime* construct_check = factory()->NewCallRuntime(
- ast_value_factory()->is_construct_call_string(),
- Runtime::FunctionForId(Runtime::kInlineIsConstructCall), arguments, pos);
+ Runtime::kInlineIsConstructCall, arguments, pos);
CallRuntime* non_callable_error = factory()->NewCallRuntime(
- ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kThrowConstructorNonCallableError),
- arguments, pos);
+ Runtime::kThrowConstructorNonCallableError, arguments, pos);
IfStatement* if_statement = factory()->NewIfStatement(
factory()->NewUnaryOperation(Token::NOT, construct_check, pos),
factory()->NewReturnStatement(non_callable_error, pos),
@@ -4363,13 +4442,12 @@ Block* Parser::BuildParameterInitializationBlock(
factory()->NewBlock(NULL, 1, true, RelocInfo::kNoPosition);
for (int i = 0; i < parameters.params.length(); ++i) {
auto parameter = parameters.params[i];
- // TODO(caitp,rossberg): Remove special handling for rest once desugared.
- if (parameter.is_rest) break;
DeclarationDescriptor descriptor;
descriptor.declaration_kind = DeclarationDescriptor::PARAMETER;
descriptor.parser = this;
descriptor.declaration_scope = scope_;
descriptor.scope = scope_;
+ descriptor.hoist_scope = nullptr;
descriptor.mode = LET;
descriptor.is_const = false;
descriptor.needs_init = true;
@@ -4380,6 +4458,7 @@ Block* Parser::BuildParameterInitializationBlock(
factory()->NewVariableProxy(parameters.scope->parameter(i));
if (parameter.initializer != nullptr) {
// IS_UNDEFINED($param) ? initializer : $param
+ DCHECK(!parameter.is_rest);
auto condition = factory()->NewCompareOperation(
Token::EQ_STRICT,
factory()->NewVariableProxy(parameters.scope->parameter(i)),
@@ -4389,11 +4468,113 @@ Block* Parser::BuildParameterInitializationBlock(
condition, parameter.initializer, initial_value,
RelocInfo::kNoPosition);
descriptor.initialization_pos = parameter.initializer->position();
+ } else if (parameter.is_rest) {
+ // $rest = [];
+ // for (var $argument_index = $rest_index;
+ // $argument_index < %_ArgumentsLength();
+ // ++$argument_index) {
+ // %AppendElement($rest, %_Arguments($argument_index));
+ // }
+ // let <param> = $rest;
+ DCHECK(parameter.pattern->IsVariableProxy());
+ DCHECK_EQ(i, parameters.params.length() - 1);
+
+ int pos = parameter.pattern->position();
+ Variable* temp_var = parameters.scope->parameter(i);
+ auto empty_values = new (zone()) ZoneList<Expression*>(0, zone());
+ auto empty_array = factory()->NewArrayLiteral(
+ empty_values, parameters.rest_array_literal_index,
+ is_strong(language_mode()), RelocInfo::kNoPosition);
+
+ auto init_array = factory()->NewAssignment(
+ Token::INIT_VAR, factory()->NewVariableProxy(temp_var), empty_array,
+ RelocInfo::kNoPosition);
+
+ auto loop = factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
+
+ auto argument_index =
+ parameters.scope->NewTemporary(ast_value_factory()->empty_string());
+ auto init = factory()->NewExpressionStatement(
+ factory()->NewAssignment(
+ Token::INIT_VAR, factory()->NewVariableProxy(argument_index),
+ factory()->NewSmiLiteral(i, RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ auto empty_arguments = new (zone()) ZoneList<Expression*>(0, zone());
+
+ // $arguments_index < arguments.length
+ auto cond = factory()->NewCompareOperation(
+ Token::LT, factory()->NewVariableProxy(argument_index),
+ factory()->NewCallRuntime(Runtime::kInlineArgumentsLength,
+ empty_arguments, RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ // ++argument_index
+ auto next = factory()->NewExpressionStatement(
+ factory()->NewCountOperation(
+ Token::INC, true, factory()->NewVariableProxy(argument_index),
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ // %_Arguments($arguments_index)
+ auto arguments_args = new (zone()) ZoneList<Expression*>(1, zone());
+ arguments_args->Add(factory()->NewVariableProxy(argument_index), zone());
+
+ // %AppendElement($rest, %_Arguments($arguments_index))
+ auto append_element_args = new (zone()) ZoneList<Expression*>(2, zone());
+
+ append_element_args->Add(factory()->NewVariableProxy(temp_var), zone());
+ append_element_args->Add(
+ factory()->NewCallRuntime(Runtime::kInlineArguments, arguments_args,
+ RelocInfo::kNoPosition),
+ zone());
+
+ auto body = factory()->NewExpressionStatement(
+ factory()->NewCallRuntime(Runtime::kAppendElement,
+ append_element_args,
+ RelocInfo::kNoPosition),
+ RelocInfo::kNoPosition);
+
+ loop->Initialize(init, cond, next, body);
+
+ init_block->AddStatement(
+ factory()->NewExpressionStatement(init_array, RelocInfo::kNoPosition),
+ zone());
+
+ init_block->AddStatement(loop, zone());
+
+ descriptor.initialization_pos = pos;
+ }
+
+ Scope* param_scope = scope_;
+ Block* param_block = init_block;
+ if (!parameter.is_simple() && scope_->calls_sloppy_eval()) {
+ param_scope = NewScope(scope_, BLOCK_SCOPE);
+ param_scope->set_is_declaration_scope();
+ param_scope->set_start_position(parameter.pattern->position());
+ param_scope->set_end_position(RelocInfo::kNoPosition);
+ param_scope->RecordEvalCall();
+ param_block = factory()->NewBlock(NULL, 8, true, RelocInfo::kNoPosition);
+ param_block->set_scope(param_scope);
+ descriptor.hoist_scope = scope_;
+ }
+
+ {
+ BlockState block_state(&scope_, param_scope);
+ DeclarationParsingResult::Declaration decl(
+ parameter.pattern, parameter.pattern->position(), initial_value);
+ PatternRewriter::DeclareAndInitializeVariables(param_block, &descriptor,
+ &decl, nullptr, CHECK_OK);
+ }
+
+ if (!parameter.is_simple() && scope_->calls_sloppy_eval()) {
+ param_scope = param_scope->FinalizeBlockScope();
+ if (param_scope != nullptr) {
+ CheckConflictingVarDeclarations(param_scope, CHECK_OK);
+ }
+ init_block->AddStatement(param_block, zone());
}
- DeclarationParsingResult::Declaration decl(
- parameter.pattern, parameter.pattern->position(), initial_value);
- PatternRewriter::DeclareAndInitializeVariables(init_block, &descriptor,
- &decl, nullptr, CHECK_OK);
}
return init_block;
}
@@ -4422,12 +4603,12 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
// For concise constructors, check that they are constructed,
// not called.
- if (i::IsConstructor(kind)) {
+ if (IsClassConstructor(kind)) {
AddAssertIsConstruct(result, pos);
}
ZoneList<Statement*>* body = result;
- Scope* inner_scope = nullptr;
+ Scope* inner_scope = scope_;
Block* inner_block = nullptr;
if (!parameters.is_simple) {
inner_scope = NewScope(scope_, BLOCK_SCOPE);
@@ -4439,16 +4620,14 @@ ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
}
{
- BlockState block_state(&scope_, inner_scope ? inner_scope : scope_);
+ BlockState block_state(&scope_, inner_scope);
// For generators, allocate and yield an iterator on function entry.
if (IsGeneratorFunction(kind)) {
ZoneList<Expression*>* arguments =
new(zone()) ZoneList<Expression*>(0, zone());
CallRuntime* allocation = factory()->NewCallRuntime(
- ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), arguments,
- pos);
+ Runtime::kCreateJSGeneratorObject, arguments, pos);
VariableProxy* init_proxy = factory()->NewVariableProxy(
function_state_->generator_object_variable());
Assignment* assignment = factory()->NewAssignment(
@@ -4558,7 +4737,7 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
SET_ALLOW(harmony_sloppy_let);
SET_ALLOW(harmony_rest_parameters);
SET_ALLOW(harmony_default_parameters);
- SET_ALLOW(harmony_spreadcalls);
+ SET_ALLOW(harmony_spread_calls);
SET_ALLOW(harmony_destructuring);
SET_ALLOW(harmony_spread_arrays);
SET_ALLOW(harmony_new_target);
@@ -4566,7 +4745,8 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
#undef SET_ALLOW
}
PreParser::PreParseResult result = reusable_preparser_->PreParseLazyFunction(
- language_mode(), function_state_->kind(), logger, bookmark);
+ language_mode(), function_state_->kind(), scope_->has_simple_parameters(),
+ logger, bookmark);
if (pre_parse_timer_ != NULL) {
pre_parse_timer_->Stop();
}
@@ -4706,40 +4886,45 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) {
const Runtime::Function* function = Runtime::FunctionForName(name->string());
- // Check for built-in IS_VAR macro.
- if (function != NULL &&
- function->intrinsic_type == Runtime::RUNTIME &&
- function->function_id == Runtime::kIS_VAR) {
- // %IS_VAR(x) evaluates to x if x is a variable,
- // leads to a parse error otherwise. Could be implemented as an
- // inline function %_IS_VAR(x) to eliminate this special case.
- if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
- return args->at(0);
- } else {
- ReportMessage(MessageTemplate::kNotIsvar);
+ if (function != NULL) {
+ // Check for possible name clash.
+ DCHECK_EQ(Context::kNotFound,
+ Context::IntrinsicIndexForName(name->string()));
+ // Check for built-in IS_VAR macro.
+ if (function->function_id == Runtime::kIS_VAR) {
+ DCHECK_EQ(Runtime::RUNTIME, function->intrinsic_type);
+ // %IS_VAR(x) evaluates to x if x is a variable,
+ // leads to a parse error otherwise. Could be implemented as an
+ // inline function %_IS_VAR(x) to eliminate this special case.
+ if (args->length() == 1 && args->at(0)->AsVariableProxy() != NULL) {
+ return args->at(0);
+ } else {
+ ReportMessage(MessageTemplate::kNotIsvar);
+ *ok = false;
+ return NULL;
+ }
+ }
+
+ // Check that the expected number of arguments are being passed.
+ if (function->nargs != -1 && function->nargs != args->length()) {
+ ReportMessage(MessageTemplate::kIllegalAccess);
*ok = false;
return NULL;
}
- }
- // Check that the expected number of arguments are being passed.
- if (function != NULL &&
- function->nargs != -1 &&
- function->nargs != args->length()) {
- ReportMessage(MessageTemplate::kIllegalAccess);
- *ok = false;
- return NULL;
+ return factory()->NewCallRuntime(function, args, pos);
}
- // Check that the function is defined if it's an inline runtime call.
- if (function == NULL && name->FirstCharacter() == '_') {
+ int context_index = Context::IntrinsicIndexForName(name->string());
+
+ // Check that the function is defined.
+ if (context_index == Context::kNotFound) {
ParserTraits::ReportMessage(MessageTemplate::kNotDefined, name);
*ok = false;
return NULL;
}
- // We have a valid intrinsics call or a call to a builtin.
- return factory()->NewCallRuntime(name, function, args, pos);
+ return factory()->NewCallRuntime(context_index, args, pos);
}
@@ -4765,6 +4950,41 @@ void Parser::CheckConflictingVarDeclarations(Scope* scope, bool* ok) {
}
+void Parser::InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok) {
+ // For each variable which is used as a function declaration in a sloppy
+ // block,
+ DCHECK(scope->is_declaration_scope());
+ SloppyBlockFunctionMap* map = scope->sloppy_block_function_map();
+ for (ZoneHashMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
+ AstRawString* name = static_cast<AstRawString*>(p->key);
+ // If the variable wouldn't conflict with a lexical declaration,
+ Variable* var = scope->LookupLocal(name);
+ if (var == nullptr || !IsLexicalVariableMode(var->mode())) {
+ // Declare a var-style binding for the function in the outer scope
+ VariableProxy* proxy = scope->NewUnresolved(factory(), name);
+ Declaration* declaration = factory()->NewVariableDeclaration(
+ proxy, VAR, scope, RelocInfo::kNoPosition);
+ Declare(declaration, DeclarationDescriptor::NORMAL, true, ok, scope);
+ DCHECK(ok); // Based on the preceding check, this should not fail
+ if (!ok) return;
+
+ // Write in assignments to var for each block-scoped function declaration
+ auto delegates = static_cast<SloppyBlockFunctionMap::Vector*>(p->value);
+ for (SloppyBlockFunctionStatement* delegate : *delegates) {
+ // Read from the local lexical scope and write to the function scope
+ VariableProxy* to = scope->NewUnresolved(factory(), name);
+ VariableProxy* from = delegate->scope()->NewUnresolved(factory(), name);
+ Expression* assignment = factory()->NewAssignment(
+ Token::ASSIGN, to, from, RelocInfo::kNoPosition);
+ Statement* statement = factory()->NewExpressionStatement(
+ assignment, RelocInfo::kNoPosition);
+ delegate->set_statement(statement);
+ }
+ }
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Parser support
@@ -5881,9 +6101,8 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
ZoneList<Expression*>* args =
new (zone()) ZoneList<Expression*>(1, zone());
args->Add(sub, zone());
- Expression* middle = factory()->NewCallRuntime(
- ast_value_factory()->to_string_string(), NULL, args,
- sub->position());
+ Expression* middle = factory()->NewCallRuntime(Runtime::kInlineToString,
+ args, sub->position());
expr = factory()->NewBinaryOperation(
Token::ADD, factory()->NewBinaryOperation(
@@ -5915,7 +6134,7 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
this->CheckPossibleEvalCall(tag, scope_);
Expression* call_site = factory()->NewCallRuntime(
- ast_value_factory()->get_template_callsite_string(), NULL, args, start);
+ Context::GET_TEMPLATE_CALL_SITE_INDEX, args, start);
// Call TagFn
ZoneList<Expression*>* call_args =
@@ -5971,10 +6190,9 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
ZoneList<Expression*>* spread_list =
new (zone()) ZoneList<Expression*>(0, zone());
spread_list->Add(list->at(0)->AsSpread()->expression(), zone());
- args->Add(
- factory()->NewCallRuntime(ast_value_factory()->spread_iterable_string(),
- NULL, spread_list, RelocInfo::kNoPosition),
- zone());
+ args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
+ spread_list, RelocInfo::kNoPosition),
+ zone());
return args;
} else {
// Spread-call with multiple arguments produces array literals for each
@@ -6008,16 +6226,14 @@ ZoneList<v8::internal::Expression*>* Parser::PrepareSpreadArguments(
ZoneList<v8::internal::Expression*>* spread_list =
new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
spread_list->Add(list->at(i++)->AsSpread()->expression(), zone());
- args->Add(factory()->NewCallRuntime(
- ast_value_factory()->spread_iterable_string(), NULL,
- spread_list, RelocInfo::kNoPosition),
+ args->Add(factory()->NewCallRuntime(Context::SPREAD_ITERABLE_INDEX,
+ spread_list, RelocInfo::kNoPosition),
zone());
}
list = new (zone()) ZoneList<v8::internal::Expression*>(1, zone());
- list->Add(factory()->NewCallRuntime(
- ast_value_factory()->spread_arguments_string(), NULL, args,
- RelocInfo::kNoPosition),
+ list->Add(factory()->NewCallRuntime(Context::SPREAD_ARGUMENTS_INDEX, args,
+ RelocInfo::kNoPosition),
zone());
return list;
}
@@ -6030,16 +6246,15 @@ Expression* Parser::SpreadCall(Expression* function,
int pos) {
if (function->IsSuperCallReference()) {
// Super calls
- // %ReflectConstruct(%GetPrototype(<this-function>), args, new.target))
+ // %reflect_construct(%GetPrototype(<this-function>), args, new.target))
ZoneList<Expression*>* tmp = new (zone()) ZoneList<Expression*>(1, zone());
tmp->Add(function->AsSuperCallReference()->this_function_var(), zone());
- Expression* get_prototype = factory()->NewCallRuntime(
- ast_value_factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kGetPrototype), tmp, pos);
+ Expression* get_prototype =
+ factory()->NewCallRuntime(Runtime::kGetPrototype, tmp, pos);
args->InsertAt(0, get_prototype, zone());
args->Add(function->AsSuperCallReference()->new_target_var(), zone());
- return factory()->NewCallRuntime(
- ast_value_factory()->reflect_construct_string(), NULL, args, pos);
+ return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args,
+ pos);
} else {
if (function->IsProperty()) {
// Method calls
@@ -6067,8 +6282,7 @@ Expression* Parser::SpreadCall(Expression* function,
args->InsertAt(1, factory()->NewUndefinedLiteral(RelocInfo::kNoPosition),
zone());
}
- return factory()->NewCallRuntime(
- ast_value_factory()->reflect_apply_string(), NULL, args, pos);
+ return factory()->NewCallRuntime(Context::REFLECT_APPLY_INDEX, args, pos);
}
}
@@ -6078,8 +6292,7 @@ Expression* Parser::SpreadCallNew(Expression* function,
int pos) {
args->InsertAt(0, function, zone());
- return factory()->NewCallRuntime(
- ast_value_factory()->reflect_construct_string(), NULL, args, pos);
+ return factory()->NewCallRuntime(Context::REFLECT_CONSTRUCT_INDEX, args, pos);
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index a0be1dfe7e..cf4cdad66b 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -546,6 +546,9 @@ struct ParserFormalParameters : FormalParametersBase {
Expression* pattern;
Expression* initializer;
bool is_rest;
+ bool is_simple() const {
+ return pattern->IsVariableProxy() && initializer == nullptr && !is_rest;
+ }
};
explicit ParserFormalParameters(Scope* scope)
@@ -783,11 +786,11 @@ class ParserTraits {
Expression* initializer, bool is_rest);
V8_INLINE void DeclareFormalParameter(
Scope* scope, const ParserFormalParameters::Parameter& parameter,
- bool is_simple, ExpressionClassifier* classifier);
- void ParseArrowFunctionFormalParameters(
- ParserFormalParameters* parameters, Expression* params,
- const Scanner::Location& params_loc,
- Scanner::Location* duplicate_loc, bool* ok);
+ ExpressionClassifier* classifier);
+ void ParseArrowFunctionFormalParameters(ParserFormalParameters* parameters,
+ Expression* params,
+ const Scanner::Location& params_loc,
+ bool* ok);
void ParseArrowFunctionFormalParameterList(
ParserFormalParameters* parameters, Expression* params,
const Scanner::Location& params_loc,
@@ -974,6 +977,7 @@ class Parser : public ParserBase<ParserTraits> {
Parser* parser;
Scope* declaration_scope;
Scope* scope;
+ Scope* hoist_scope;
VariableMode mode;
bool is_const;
bool needs_init;
@@ -1069,8 +1073,8 @@ class Parser : public ParserBase<ParserTraits> {
Statement* ParseWithStatement(ZoneList<const AstRawString*>* labels,
bool* ok);
CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
- SwitchStatement* ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
- bool* ok);
+ Statement* ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
+ bool* ok);
DoWhileStatement* ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
bool* ok);
WhileStatement* ParseWhileStatement(ZoneList<const AstRawString*>* labels,
@@ -1130,11 +1134,14 @@ class Parser : public ParserBase<ParserTraits> {
// hoisted over such a scope.
void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
+ // Implement sloppy block-scoped functions, ES2015 Annex B 3.3
+ void InsertSloppyBlockFunctionVarBindings(Scope* scope, bool* ok);
+
// Parser support
VariableProxy* NewUnresolved(const AstRawString* name, VariableMode mode);
Variable* Declare(Declaration* declaration,
DeclarationDescriptor::Kind declaration_kind, bool resolve,
- bool* ok);
+ bool* ok, Scope* declaration_scope = nullptr);
bool TargetStackContainsLabel(const AstRawString* label);
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
@@ -1316,7 +1323,8 @@ Expression* ParserTraits::SpreadCallNew(
void ParserTraits::AddFormalParameter(
ParserFormalParameters* parameters,
Expression* pattern, Expression* initializer, bool is_rest) {
- bool is_simple = pattern->IsVariableProxy() && initializer == nullptr;
+ bool is_simple =
+ !is_rest && pattern->IsVariableProxy() && initializer == nullptr;
DCHECK(parser_->allow_harmony_destructuring() ||
parser_->allow_harmony_rest_parameters() ||
parser_->allow_harmony_default_parameters() || is_simple);
@@ -1331,14 +1339,15 @@ void ParserTraits::AddFormalParameter(
void ParserTraits::DeclareFormalParameter(
Scope* scope, const ParserFormalParameters::Parameter& parameter,
- bool is_simple, ExpressionClassifier* classifier) {
+ ExpressionClassifier* classifier) {
bool is_duplicate = false;
- // TODO(caitp): Remove special handling for rest once desugaring is in.
- auto name = is_simple || parameter.is_rest
- ? parameter.name : parser_->ast_value_factory()->empty_string();
- auto mode = is_simple || parameter.is_rest ? VAR : TEMPORARY;
- Variable* var =
- scope->DeclareParameter(name, mode, parameter.is_rest, &is_duplicate);
+ bool is_simple = classifier->is_simple_parameter_list();
+ auto name = parameter.name;
+ auto mode = is_simple ? VAR : TEMPORARY;
+ if (!is_simple) scope->SetHasNonSimpleParameters();
+ bool is_optional = parameter.initializer != nullptr;
+ Variable* var = scope->DeclareParameter(
+ name, mode, is_optional, parameter.is_rest, &is_duplicate);
if (is_duplicate) {
classifier->RecordDuplicateFormalParameterError(
parser_->scanner()->location());
diff --git a/deps/v8/src/pattern-rewriter.cc b/deps/v8/src/pattern-rewriter.cc
index 10702d65ce..e4c602aa48 100644
--- a/deps/v8/src/pattern-rewriter.cc
+++ b/deps/v8/src/pattern-rewriter.cc
@@ -54,7 +54,8 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
proxy, descriptor_->mode, descriptor_->scope,
descriptor_->declaration_pos);
Variable* var = parser->Declare(declaration, descriptor_->declaration_kind,
- descriptor_->mode != VAR, ok_);
+ descriptor_->mode != VAR, ok_,
+ descriptor_->hoist_scope);
if (!*ok_) return;
DCHECK_NOT_NULL(var);
DCHECK(!proxy->is_resolved() || proxy->var() == var);
@@ -141,10 +142,9 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
// and add it to the initialization statement block.
// Note that the function does different things depending on
// the number of arguments (1 or 2).
- initialize = factory()->NewCallRuntime(
- ast_value_factory()->initialize_const_global_string(),
- Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments,
- descriptor_->initialization_pos);
+ initialize =
+ factory()->NewCallRuntime(Runtime::kInitializeConstGlobal, arguments,
+ descriptor_->initialization_pos);
} else {
// Add language mode.
// We may want to pass singleton to avoid Literal allocations.
@@ -162,10 +162,9 @@ void Parser::PatternRewriter::VisitVariableProxy(VariableProxy* pattern) {
value = NULL; // zap the value to avoid the unnecessary assignment
// Construct the call to Runtime_InitializeVarGlobal
// and add it to the initialization statement block.
- initialize = factory()->NewCallRuntime(
- ast_value_factory()->initialize_var_global_string(),
- Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments,
- descriptor_->declaration_pos);
+ initialize =
+ factory()->NewCallRuntime(Runtime::kInitializeVarGlobal, arguments,
+ descriptor_->declaration_pos);
} else {
initialize = NULL;
}
@@ -311,7 +310,7 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
if (spread != nullptr) {
// array = [];
- // if (!done) $concatIterableToArray(array, iterator);
+ // if (!done) %concat_iterable_to_array(array, iterator);
auto empty_exprs = new (zone()) ZoneList<Expression*>(0, zone());
auto array = CreateTempVar(factory()->NewArrayLiteral(
empty_exprs,
@@ -323,9 +322,9 @@ void Parser::PatternRewriter::VisitArrayLiteral(ArrayLiteral* node) {
auto arguments = new (zone()) ZoneList<Expression*>(2, zone());
arguments->Add(factory()->NewVariableProxy(array), zone());
arguments->Add(factory()->NewVariableProxy(iterator), zone());
- auto spread_into_array_call = factory()->NewCallRuntime(
- ast_value_factory()->concat_iterable_to_array_string(), nullptr,
- arguments, RelocInfo::kNoPosition);
+ auto spread_into_array_call =
+ factory()->NewCallRuntime(Context::CONCAT_ITERABLE_TO_ARRAY_INDEX,
+ arguments, RelocInfo::kNoPosition);
auto if_statement = factory()->NewIfStatement(
factory()->NewUnaryOperation(Token::NOT,
@@ -363,7 +362,12 @@ void Parser::PatternRewriter::VisitAssignment(Assignment* node) {
void Parser::PatternRewriter::VisitSpread(Spread* node) {
- // TODO(dslomov): implement.
+ UNREACHABLE();
+}
+
+
+void Parser::PatternRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
+ UNREACHABLE();
}
@@ -391,6 +395,7 @@ NOT_A_PATTERN(CountOperation)
NOT_A_PATTERN(DebuggerStatement)
NOT_A_PATTERN(DoWhileStatement)
NOT_A_PATTERN(EmptyStatement)
+NOT_A_PATTERN(SloppyBlockFunctionStatement)
NOT_A_PATTERN(ExportDeclaration)
NOT_A_PATTERN(ExpressionStatement)
NOT_A_PATTERN(ForInStatement)
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 35968fc682..b1e2825751 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -698,7 +698,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*(p + 3) = instr4;
*(p + 4) = instr5;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(p, 5 * kInstrSize);
+ Assembler::FlushICacheWithoutIsolate(p, 5 * kInstrSize);
}
#else
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
@@ -713,7 +713,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
*p = instr1;
*(p + 1) = instr2;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(p, 2 * kInstrSize);
+ Assembler::FlushICacheWithoutIsolate(p, 2 * kInstrSize);
}
#endif
return;
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 542968d8e7..6bbb53c4ba 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -34,6 +34,8 @@
// modified significantly by Google Inc.
// Copyright 2014 the V8 project authors. All rights reserved.
+#include "src/ppc/assembler-ppc.h"
+
#if V8_TARGET_ARCH_PPC
#include "src/base/bits.h"
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index 6ecfcea2f6..e08c865e4e 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -23,12 +23,19 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
// -- r3 : number of arguments excluding receiver
// -- r4 : called function (only guaranteed when
// extra_args requires it)
- // -- cp : context
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument (argc == r0)
// -- sp[4 * argc] : receiver
// -----------------------------------
+ __ AssertFunction(r4);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -132,7 +139,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -140,122 +148,132 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1, r5, r6);
-
- Register function = r4;
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r5);
- __ cmp(function, r5);
- __ Assert(eq, kUnexpectedStringFunction);
- }
- // Load the first arguments in r3 and get rid of the rest.
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
Label no_arguments;
- __ cmpi(r3, Operand::Zero());
- __ beq(&no_arguments);
- // First args = sp[(argc - 1) * 4].
- __ subi(r3, r3, Operand(1));
- __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
- __ add(sp, sp, r3);
- __ LoadP(r3, MemOperand(sp));
- // sp now point to args[0], drop args[0] + receiver.
- __ Drop(2);
-
- Register argument = r5;
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(r3, // Input.
- argument, // Result.
- r6, // Scratch.
- r7, // Scratch.
- r8, // Scratch.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, r6, r7);
- __ bind(&argument_is_string);
+ {
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ }
- // ----------- S t a t e -------------
- // -- r5 : argument converted to string
- // -- r4 : constructor function
- // -- lr : return address
- // -----------------------------------
+ // 2a. At least one argument, return r3 if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(r3, &to_string);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+ __ bgt(&to_string);
+ __ beq(&symbol_descriptive_string);
+ __ Ret();
+ }
- Label gc_required;
- __ Allocate(JSValue::kSize,
- r3, // Result.
- r6, // Scratch.
- r7, // Scratch.
- &gc_required, TAG_OBJECT);
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ Ret(1);
+ }
- // Initialising the String Object.
- Register map = r6;
- __ LoadGlobalFunctionInitialMap(function, map, r7);
- if (FLAG_debug_code) {
- __ lbz(r7, FieldMemOperand(map, Map::kInstanceSizeOffset));
- __ cmpi(r7, Operand(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
- __ lbz(r7, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
- __ cmpi(r7, Operand::Zero());
- __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 3a. Convert r3 to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
}
- __ StoreP(map, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ // 3b. Convert symbol in r3 to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ Push(r3);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- __ StoreP(argument, FieldMemOperand(r3, JSValue::kValueOffset), r0);
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : number of arguments
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+ // -- sp[argc * 4] : receiver
+ // -----------------------------------
- __ Ret();
+ // 1. Load the first argument into r3 and get rid of the rest (including the
+ // receiver).
+ {
+ Label no_arguments, done;
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&no_arguments);
+ __ subi(r3, r3, Operand(1));
+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+ __ LoadPUX(r3, MemOperand(sp, r3));
+ __ Drop(2);
+ __ b(&done);
+ __ bind(&no_arguments);
+ __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+ __ Drop(1);
+ __ bind(&done);
+ }
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- __ JumpIfSmi(r3, &convert_argument);
-
- // Is it a String?
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r6, FieldMemOperand(r5, Map::kInstanceTypeOffset));
- STATIC_ASSERT(kNotStringTag != 0);
- __ andi(r0, r6, Operand(kIsNotStringMask));
- __ bne(&convert_argument, cr0);
- __ mr(argument, r3);
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
- __ b(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into r5.
- __ bind(&convert_argument);
- __ push(function); // Preserve the function.
- __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
+ // 2. Make sure r3 is a string.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ push(r3);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+ Label convert, done_convert;
+ __ JumpIfSmi(r3, &convert);
+ __ CompareObjectType(r3, r5, r5, FIRST_NONSTRING_TYPE);
+ __ blt(&done_convert);
+ __ bind(&convert);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ push(r4);
+ __ CallStub(&stub);
+ __ pop(r4);
+ }
+ __ bind(&done_convert);
}
- __ pop(function);
- __ mr(argument, r3);
- __ b(&argument_is_string);
- // Load the empty string into r5, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(argument, Heap::kempty_stringRootIndex);
- __ Drop(1);
- __ b(&argument_is_string);
-
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ push(argument);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- r3 : the first argument
+ // -- r4 : constructor function
+ // -- lr : return address
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ mr(r5, r3);
+ __ Allocate(JSValue::kSize, r3, r6, r7, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in r3.
+ __ LoadGlobalFunctionInitialMap(r4, r6, r7);
+ __ StoreP(r6, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ LoadSmiLiteral(r6, Smi::FromInt(JSValue::kSize));
+ __ Push(r4, r5, r6);
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(r4, r5);
+ }
+ __ b(&done_allocate);
}
- __ Ret();
}
@@ -306,8 +324,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- r3 : number of arguments
// -- r4 : constructor function
@@ -317,9 +334,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- sp[...]: constructor arguments
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
Isolate* isolate = masm->isolate();
// Enter a construct frame.
@@ -391,9 +405,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r5: initial map
Label rt_call_reload_new_target;
__ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
- if (create_memento) {
- __ addi(r6, r6, Operand(AllocationMemento::kSize / kPointerSize));
- }
__ Allocate(r6, r7, r8, r9, &rt_call_reload_new_target, SIZE_IN_WORDS);
@@ -401,7 +412,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// initial map and properties and elements are set to empty fixed array.
// r4: constructor function
// r5: initial map
- // r6: object size (including memento if create_memento)
+ // r6: object size
// r7: JSObject (not tagged)
__ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
__ mr(r8, r7);
@@ -416,7 +427,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Fill all the in-object properties with the appropriate filler.
// r4: constructor function
// r5: initial map
- // r6: object size (in words, including memento if create_memento)
+ // r6: object size
// r7: JSObject (not tagged)
// r8: First in-object property of JSObject (not tagged)
// r9: End of object
@@ -458,24 +469,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ subi(r3, r9, Operand(AllocationMemento::kSize));
- __ InitializeFieldsWithFiller(r8, r3, r10);
-
- // Fill in memento fields.
- // r8: points to the allocated but uninitialized memento.
- __ LoadRoot(r10, Heap::kAllocationMementoMapRootIndex);
- __ StoreP(r10, MemOperand(r8, AllocationMemento::kMapOffset));
- // Load the AllocationSite
- __ LoadP(r10, MemOperand(sp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(r10, r3);
- __ StoreP(r10,
- MemOperand(r8, AllocationMemento::kAllocationSiteOffset));
- __ addi(r8, r8, Operand(AllocationMemento::kAllocationSiteOffset +
- kPointerSize));
- } else {
- __ InitializeFieldsWithFiller(r8, r9, r10);
- }
+ __ InitializeFieldsWithFiller(r8, r9, r10);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -494,44 +488,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r4: constructor function
// r6: original constructor
__ bind(&rt_call);
- if (create_memento) {
- // Get the cell or allocation site.
- __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
- __ Push(r5, r4, r6);
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ Push(r4, r6);
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ Push(r4, r6);
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mr(r7, r3);
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ b(&count_incremented);
- }
-
// Receiver for constructor call allocated.
// r7: JSObject
__ bind(&allocated);
- if (create_memento) {
- __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, r8);
- __ beq(&count_incremented);
- // r5 is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ LoadP(
- r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset));
- __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
- __ StoreP(
- r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset),
- r0);
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ Pop(r4, ip);
@@ -633,12 +597,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -732,8 +696,7 @@ enum IsTagged { kArgcIsSmiTagged, kArgcIsUntaggedInt };
// Clobbers r5; preserves all other registers.
-static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset, Register argc,
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
IsTagged argc_is_tagged) {
// Check the stack for overflow. We are not trying to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@@ -754,12 +717,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ bgt(&okay); // Signed comparison.
// Out of stack space.
- __ LoadP(r4, MemOperand(fp, calleeOffset));
- if (argc_is_tagged == kArgcIsUntaggedInt) {
- __ SmiTag(argc);
- }
- __ Push(r4, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -768,7 +726,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
bool is_construct) {
// Called from Generate_JS_Entry
- // r3: code entry
+ // r3: new.target
// r4: function
// r5: receiver
// r6: argc
@@ -783,22 +741,20 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
- // Set up the context from the function argument.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ mov(cp, Operand(context_address));
+ __ LoadP(cp, MemOperand(cp));
__ InitializeRootRegister();
// Push the function and the receiver onto the stack.
- __ push(r4);
- __ push(r5);
+ __ Push(r4, r5);
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Clobbers r5.
- Generate_CheckStackOverflow(masm, kFunctionOffset, r6, kArgcIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, r6, kArgcIsUntaggedInt);
// Copy arguments to the stack in a loop.
// r4: function
@@ -818,6 +774,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmp(r7, r5);
__ bne(&loop);
+ // Setup new.target and argc.
+ __ mr(r7, r3);
+ __ mr(r3, r6);
+ __ mr(r6, r7);
+
// Initialize all JavaScript callee-saved registers, since they will be seen
// by the garbage collector as part of handlers.
__ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
@@ -826,17 +787,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mr(r16, r7);
__ mr(r17, r7);
- // Invoke the code and pass argc as r3.
- __ mr(r3, r6);
- if (is_construct) {
- // No type feedback cell is available
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(r3);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
- }
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
// Exit the JS frame and remove the parameters (except function), and
// return.
}
@@ -908,7 +864,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kRealStackLimitRootIndex);
__ cmpl(r6, r0);
__ bge(&ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -991,8 +947,11 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- // Drop receiver + arguments.
- __ Drop(1); // TODO(rmcilroy): Get number of arguments from BytecodeArray.
+
+ // Drop receiver + arguments and return.
+ __ lwz(r0, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ add(sp, sp, r0);
__ blr();
}
@@ -1260,6 +1219,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r3: actual number of arguments
@@ -1267,201 +1227,41 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
Label done;
__ cmpi(r3, Operand::Zero());
__ bne(&done);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ push(r5);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ addi(r3, r3, Operand(1));
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- // r3: actual number of arguments
- Label slow, non_function;
- __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
- __ add(r4, sp, r4);
- __ LoadP(r4, MemOperand(r4));
- __ JumpIfSmi(r4, &non_function);
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- // r3: actual number of arguments
- // r4: function
- Label shift_arguments;
- __ li(r7, Operand::Zero()); // indicate regular JS_FUNCTION
- {
- Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r6,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&shift_arguments, cr0);
-
- // Do not transform the receiver for native (Compilerhints already in r6).
- __ TestBit(r6,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
- __ bne(&shift_arguments, cr0);
-
- // Compute the receiver in sloppy mode.
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, ip);
- __ LoadP(r5, MemOperand(r5, -kPointerSize));
- // r3: actual number of arguments
- // r4: function
- // r5: first argument
- __ JumpIfSmi(r5, &convert_to_object);
-
- __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, r6);
- __ beq(&use_global_proxy);
- __ LoadRoot(r6, Heap::kNullValueRootIndex);
- __ cmp(r5, r6);
- __ beq(&use_global_proxy);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r5, r6, r6, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&shift_arguments);
-
- __ bind(&convert_to_object);
-
- {
- // Enter an internal frame in order to preserve argument count.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(r3);
- __ Push(r3);
- __ mr(r3, r5);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mr(r5, r3);
-
- __ pop(r3);
- __ SmiUntag(r3);
-
- // Exit the internal frame.
- }
-
- // Restore the function to r4, and the flag to r7.
- __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
- __ add(r7, sp, r7);
- __ LoadP(r4, MemOperand(r7));
- __ li(r7, Operand::Zero());
- __ b(&patch_receiver);
-
- __ bind(&use_global_proxy);
- __ LoadP(r5, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r6, sp, ip);
- __ StoreP(r5, MemOperand(r6, -kPointerSize));
-
- __ b(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ li(r7, Operand(1, RelocInfo::NONE32)); // indicate function proxy
- __ cmpi(r5, Operand(JS_FUNCTION_PROXY_TYPE));
- __ beq(&shift_arguments);
- __ bind(&non_function);
- __ li(r7, Operand(2, RelocInfo::NONE32)); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
+ // 2. Get the callable to call (passed as receiver) from the stack.
// r3: actual number of arguments
- // r4: function
- // r7: call type (0: JS function, 1: function proxy, 2: non-function)
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, ip);
- __ StoreP(r4, MemOperand(r5, -kPointerSize));
+ __ ShiftLeftImm(r5, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r4, MemOperand(sp, r5));
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
// r3: actual number of arguments
- // r4: function
- // r7: call type (0: JS function, 1: function proxy, 2: non-function)
- __ bind(&shift_arguments);
+ // r4: callable
{
Label loop;
// Calculate the copy start address (destination). Copy end address is sp.
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ add(r5, sp, ip);
+ __ add(r5, sp, r5);
+
+ __ mtctr(r3);
__ bind(&loop);
__ LoadP(ip, MemOperand(r5, -kPointerSize));
__ StoreP(ip, MemOperand(r5));
__ subi(r5, r5, Operand(kPointerSize));
- __ cmp(r5, sp);
- __ bne(&loop);
+ __ bdnz(&loop);
// Adjust the actual number of arguments and remove the top element
// (which is a copy of the last argument).
__ subi(r3, r3, Operand(1));
__ pop();
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- // r3: actual number of arguments
- // r4: function
- // r7: call type (0: JS function, 1: function proxy, 2: non-function)
- {
- Label function, non_proxy;
- __ cmpi(r7, Operand::Zero());
- __ beq(&function);
- // Expected number of arguments is 0 for CALL_NON_FUNCTION.
- __ li(r5, Operand::Zero());
- __ cmpi(r7, Operand(1));
- __ bne(&non_proxy);
-
- __ push(r4); // re-add proxy object as additional argument
- __ addi(r3, r3, Operand(1));
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register ip without checking arguments.
- // r3: actual number of arguments
- // r4: function
- __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r5, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
-#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r5);
-#endif
- __ cmp(r5, r3); // Check formal and actual parameter counts.
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET, ne);
-
- __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
- ParameterCount expected(0);
- __ InvokeCode(ip, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1530,114 +1330,32 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ push(r4);
__ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
- __ push(r3);
- __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
- __ push(r3);
+ __ LoadP(r4, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ Push(r3, r4);
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
- Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
const int kLimitOffset = kVectorOffset - (1 * kPointerSize);
__ li(r4, Operand::Zero());
- __ Push(r3, r4); // limit and initial index.
-
- // Get the receiver.
- __ LoadP(r3, MemOperand(fp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
- // Load the shared function info while the function is still in r4.
- __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_proxy;
- __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
- __ TestBit(r5,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kStrictModeFunction,
-#else
- SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
-#endif
- r0);
- __ bne(&push_receiver, cr0);
-
- // Do not transform the receiver for strict mode functions.
- __ TestBit(r5,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kNative,
-#else
- SharedFunctionInfo::kNative + kSmiTagSize,
-#endif
- r0);
- __ bne(&push_receiver, cr0);
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(r3, &call_to_object);
- __ LoadRoot(r4, Heap::kNullValueRootIndex);
- __ cmp(r3, r4);
- __ beq(&use_global_proxy);
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, r4);
- __ beq(&use_global_proxy);
-
- // Check if the receiver is already a JavaScript object.
- // r3: receiver
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ bge(&push_receiver);
-
- // Convert the receiver to a regular object.
- // r3: receiver
- __ bind(&call_to_object);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ b(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ LoadP(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- // r3: receiver
- __ bind(&push_receiver);
- __ push(r3);
+ __ LoadP(r5, MemOperand(fp, kReceiverOffset));
+ __ Push(r3, r4, r5); // limit, initial index and receiver.
// Copy all arguments from the array to the stack.
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(r3);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
- __ bne(&call_proxy);
- __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
-
- __ LeaveFrame(StackFrame::INTERNAL, kStackSize * kPointerSize);
- __ blr();
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(r4); // add function proxy as last argument
- __ addi(r3, r3, Operand(1));
- __ li(r5, Operand::Zero());
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
- __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
// Tear down the internal frame and remove function, receiver and args.
}
@@ -1680,9 +1398,10 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ push(r3);
__ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target
__ push(r3);
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
- Generate_CheckStackOverflow(masm, kFunctionOffset, r3, kArgcIsSmiTagged);
+ Generate_CheckStackOverflow(masm, r3, kArgcIsSmiTagged);
// Push current limit and index.
const int kIndexOffset = kVectorOffset - (2 * kPointerSize);
@@ -1779,6 +1498,253 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(r4);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ lbz(r6, FieldMemOperand(r5, SharedFunctionInfo::kNativeByteOffset));
+ __ andi(r0, r6, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ bne(&done_convert, cr0);
+ {
+ __ ShiftLeftImm(r6, r3, Operand(kPointerSizeLog2));
+ __ LoadPX(r6, MemOperand(sp, r6));
+
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSFunction)
+ // -- r5 : the shared function info.
+ // -- r6 : the receiver
+ // -- cp : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(r6, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r6, r7, r7, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&done_convert);
+ __ JumpIfRoot(r6, Heap::kUndefinedValueRootIndex, &convert_global_proxy);
+ __ JumpIfNotRoot(r6, Heap::kNullValueRootIndex, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(r6);
+ }
+ __ b(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(r3);
+ __ Push(r3, r4);
+ __ mr(r3, r6);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mr(r6, r3);
+ __ Pop(r3, r4);
+ __ SmiUntag(r3);
+ }
+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r6, MemOperand(sp, r7));
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the function to call (checked to be a JSFunction)
+ // -- r5 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ LoadWordArith(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+ __ SmiUntag(r5);
+#endif
+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(r3);
+ ParameterCount expected(r5);
+ __ InvokeCode(r6, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(r4, &non_callable);
+ __ bind(&non_smi);
+ __ CompareObjectType(r4, r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET,
+ eq);
+ __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ bne(&non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(r4);
+ __ b(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ lbz(r7, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r7, Map::kIsCallable, r0);
+ __ beq(&non_callable, cr0);
+ // Overwrite the original receiver the (original) target.
+ __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r8));
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, r4);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (checked to be a JSFunction)
+ // -- r6 : the original constructor (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(r4);
+ __ AssertFunction(r6);
+
+ // Calling convention for function specific ConstructStubs require
+ // r5 to contain either an AllocationSite or undefined.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ JumpToJSEntry(ip);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (checked to be a JSFunctionProxy)
+ // -- r6 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ LoadP(r4, FieldMemOperand(r4, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r4 : the constructor to call (can be any Object)
+ // -- r6 : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(r4, &non_constructor);
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ lbz(r5, FieldMemOperand(r7, Map::kBitFieldOffset));
+ __ TestBit(r5, Map::kIsConstructor, r0);
+ __ beq(&non_constructor, cr0);
+
+ // Dispatch based on instance type.
+ __ CompareInstanceType(r7, r8, JS_FUNCTION_TYPE);
+ __ Jump(masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET, eq);
+ __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ Jump(masm->isolate()->builtins()->ConstructProxy(), RelocInfo::CODE_TARGET,
+ eq);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ ShiftLeftImm(r8, r3, Operand(kPointerSizeLog2));
+ __ StorePX(r4, MemOperand(sp, r8));
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, r4);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ Push(r4);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r5 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- r4 : the target to call (can be any Object).
+
+ // Calculate number of arguments (add one for receiver).
+ __ addi(r6, r3, Operand(1));
+
+ // Push the arguments.
+ Label loop;
+ __ addi(r5, r5, Operand(kPointerSize)); // Bias up for LoadPU
+ __ mtctr(r6);
+ __ bind(&loop);
+ __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ __ push(r6);
+ __ bdnz(&loop);
+
+ // Call the target.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r3 : actual number of arguments
@@ -1801,7 +1767,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&enough);
EnterArgumentsAdaptorFrame(masm);
- // Calculate copy start address into r3 and copy end address into r5.
+ // Calculate copy start address into r3 and copy end address into r6.
// r3: actual number of arguments as a smi
// r4: function
// r5: expected number of arguments
@@ -1810,20 +1776,21 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ add(r3, r3, fp);
// adjust for return address and receiver
__ addi(r3, r3, Operand(2 * kPointerSize));
- __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
- __ sub(r5, r3, r5);
+ __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
+ __ sub(r6, r3, r6);
// Copy the arguments (including the receiver) to the new stack frame.
// r3: copy start address
// r4: function
- // r5: copy end address
+ // r5: expected number of arguments
+ // r6: copy end address
// ip: code entry to call
Label copy;
__ bind(&copy);
__ LoadP(r0, MemOperand(r3, 0));
__ push(r0);
- __ cmp(r3, r5); // Compare before moving to next argument.
+ __ cmp(r3, r6); // Compare before moving to next argument.
__ subi(r3, r3, Operand(kPointerSize));
__ bne(&copy);
@@ -1893,21 +1860,24 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// r5: expected number of arguments
// ip: code entry to call
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
- __ sub(r5, fp, r5);
+ __ ShiftLeftImm(r6, r5, Operand(kPointerSizeLog2));
+ __ sub(r6, fp, r6);
// Adjust for frame.
- __ subi(r5, r5, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ subi(r6, r6, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
2 * kPointerSize));
Label fill;
__ bind(&fill);
__ push(r0);
- __ cmp(sp, r5);
+ __ cmp(sp, r6);
__ bne(&fill);
}
// Call the entry point.
__ bind(&invoke);
+ __ mr(r3, r5);
+ // r3 : expected number of arguments
+ // r4 : function (passed through to callee)
__ CallJSEntry(ip);
// Store offset of return address for deoptimizer.
@@ -1928,7 +1898,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bkpt(0);
}
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 435ac47c00..290159a3e7 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -12,6 +12,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
+#include "src/ppc/code-stubs-ppc.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
@@ -706,29 +707,25 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(lhs, rhs);
// Figure out which native to call and setup the arguments.
- if (cc == eq && strict()) {
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ if (cc == eq) {
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript native;
- if (cc == eq) {
- native = Builtins::EQUALS;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
} else {
- native =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc == lt || cc == le) {
- ncr = GREATER;
- } else {
- DCHECK(cc == gt || cc == ge); // remaining cases
- ncr = LESS;
- }
- __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
- __ push(r3);
+ DCHECK(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
}
+ __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
+ __ push(r3);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1366,216 +1363,115 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-// Uses registers r3 to r7.
-// Expected input (depending on whether args are in registers or on the stack):
-// * object: r3 or at sp + 1 * kPointerSize.
-// * function: r4 or at sp.
-//
-// An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed in r8.
-// (See LCodeGen::DoInstanceOfKnownGlobal)
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub:
- const Register object = r3; // Object (lhs).
- Register map = r6; // Map of the object.
- const Register function = r4; // Function (rhs).
- const Register prototype = r7; // Prototype of the function.
- // The map_check_delta was stored in r8
- // The bool_load_delta was stored in r9
- // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
- const Register map_check_delta = r8;
- const Register bool_load_delta = r9;
- const Register inline_site = r10;
- const Register scratch = r5;
- Register scratch3 = no_reg;
- Label slow, loop, is_instance, is_not_instance, not_js_object;
-
- if (!HasArgsInRegisters()) {
- __ LoadP(object, MemOperand(sp, 1 * kPointerSize));
- __ LoadP(function, MemOperand(sp, 0));
- }
-
- // Check that the left hand is a JS object and load map.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ bne(&miss);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
- __ bne(&miss);
- __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&miss);
- }
-
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = r4; // Object (lhs).
+ Register const function = r3; // Function (rhs).
+ Register const object_map = r5; // Map of {object}.
+ Register const function_map = r6; // Map of {function}.
+ Register const function_prototype = r7; // Prototype of {function}.
+ Register const scratch = r8;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ bne(&fast_case);
+ __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+ __ bne(&fast_case);
+ __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow_case);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ Ret();
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- DCHECK(HasArgsInRegisters());
- // Patch the (relocated) inlined map check.
-
- const Register offset = map_check_delta;
- __ mflr(inline_site);
- __ sub(inline_site, inline_site, offset);
- // Get the map location in offset and patch it.
- __ GetRelocatedValue(inline_site, offset, scratch);
- __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
-
- __ mr(r11, map);
- __ RecordWriteField(offset, Cell::kValueOffset, r11, function,
- kLRHasNotBeenSaved, kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE);
+ __ bne(&slow_case);
- // Register mapping: r6 is object map and r7 is function prototype.
- // Get prototype of object into r5.
- __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+ // Ensure that {function} has an instance prototype.
+ __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset));
+ __ TestBit(scratch, Map::kHasNonInstancePrototype, r0);
+ __ bne(&slow_case, cr0);
- // We don't need map any more. Use it as a scratch register.
- scratch3 = map;
- map = no_reg;
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ LoadP(shared_info,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(scratch, FieldMemOperand(shared_info,
+ SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+ SharedFunctionInfo::kBoundFunction,
+#else
+ SharedFunctionInfo::kBoundFunction + kSmiTagSize,
+#endif
+ r0);
+ __ bne(&slow_case, cr0);
- // Loop through the prototype chain looking for the function prototype.
- __ LoadRoot(scratch3, Heap::kNullValueRootIndex);
+ // Get the "prototype" (or initial map) of the {function}.
+ __ LoadP(function_prototype,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE);
+ __ bne(&function_prototype_valid);
+ __ LoadP(function_prototype,
+ FieldMemOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Register const null = scratch;
+ Label done, loop;
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ LoadRoot(null, Heap::kNullValueRootIndex);
__ bind(&loop);
- __ cmp(scratch, prototype);
- __ beq(&is_instance);
- __ cmp(scratch, scratch3);
- __ beq(&is_not_instance);
- __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
- __ b(&loop);
- Factory* factory = isolate()->factory();
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->true_value());
- }
- } else {
- // Patch the call site to return true.
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ add(inline_site, inline_site, bool_load_delta);
- // Get the boolean result location in scratch and patch it.
- __ SetRelocatedValue(inline_site, scratch, r3);
-
- if (!ReturnTrueFalseObject()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- }
- } else {
- // Patch the call site to return false.
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ add(inline_site, inline_site, bool_load_delta);
- // Get the boolean result location in scratch and patch it.
- __ SetRelocatedValue(inline_site, scratch, r3);
-
- if (!ReturnTrueFalseObject()) {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow);
- __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE);
- __ bne(&slow);
-
- // Null is not instance of anything.
- __ Cmpi(object, Operand(isolate()->factory()->null_value()), r0);
- __ bne(&object_not_null);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null);
- // Smi values are not instances of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- __ bind(&object_not_null_or_smi);
- // String values are not instances of anything.
- __ IsObjectJSStringType(object, scratch, &slow);
- if (ReturnTrueFalseObject()) {
- __ Move(r3, factory->false_value());
- } else {
- __ LoadSmiLiteral(r3, Smi::FromInt(1));
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
-
- // Slow-case. Tail call builtin.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- if (HasArgsInRegisters()) {
- __ Push(r3, r4);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ Push(r3, r4);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ cmpi(r3, Operand::Zero());
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- __ LoadRoot(r4, Heap::kFalseValueRootIndex);
- __ isel(eq, r3, r3, r4);
- } else {
- Label true_value, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&true_value);
-
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_value);
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ __ LoadP(object_prototype,
+ FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, function_prototype);
+ __ beq(&done);
+ __ cmp(object_prototype, null);
+ __ LoadP(object_map,
+ FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ bne(&loop);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+ __ Ret();
- __ bind(&done);
- }
- __ Ret(HasArgsInRegisters() ? 0 : 2);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ Push(object, function);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -1685,74 +1581,75 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[1] : receiver displacement
- // sp[2] : function
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
+
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ bne(&runtime);
// Patch the arguments.length and the parameters pointer in the current frame.
- __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));
- __ SmiToPtrArrayOffset(r5, r5);
- __ add(r6, r6, r5);
+ __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ add(r6, r6, r7);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
__ bind(&runtime);
+ __ Push(r4, r6, r5);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // sp[0] : number of parameters (tagged)
- // sp[1] : address of receiver argument
- // sp[2] : function
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
// Registers used over whole function:
- // r9 : allocated object (tagged)
- // r11 : mapped parameter count (tagged)
+ // r8 : arguments count (tagged)
+ // r9 : mapped parameter count (tagged)
- __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
- // r4 = parameter count (tagged)
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ Label adaptor_frame, try_allocate, runtime;
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
__ beq(&adaptor_frame);
// No adaptor, parameter count = argument count.
- __ mr(r5, r4);
+ __ mr(r8, r5);
+ __ mr(r9, r5);
__ b(&try_allocate);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToPtrArrayOffset(r7, r5);
+ __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r8);
__ add(r6, r6, r7);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
- // r4 = parameter count (tagged)
- // r5 = argument count (tagged)
- // Compute the mapped parameter count = min(r4, r5) in r4.
- __ cmp(r4, r5);
+ // r8 = argument count (tagged)
+ // r9 = parameter count (tagged)
+ // Compute the mapped parameter count = min(r5, r8) in r9.
+ __ cmp(r5, r8);
if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(lt, r4, r4, r5);
+ __ isel(lt, r9, r5, r8);
} else {
Label skip;
+ __ mr(r9, r5);
__ blt(&skip);
- __ mr(r4, r5);
+ __ mr(r9, r8);
__ bind(&skip);
}
@@ -1763,9 +1660,9 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
if (CpuFeatures::IsSupported(ISELECT)) {
- __ SmiToPtrArrayOffset(r11, r4);
+ __ SmiToPtrArrayOffset(r11, r9);
__ addi(r11, r11, Operand(kParameterMapHeaderSize));
__ isel(eq, r11, r0, r11);
} else {
@@ -1774,13 +1671,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ li(r11, Operand::Zero());
__ b(&skip3);
__ bind(&skip2);
- __ SmiToPtrArrayOffset(r11, r4);
+ __ SmiToPtrArrayOffset(r11, r9);
__ addi(r11, r11, Operand(kParameterMapHeaderSize));
__ bind(&skip3);
}
// 2. Backing store.
- __ SmiToPtrArrayOffset(r7, r5);
+ __ SmiToPtrArrayOffset(r7, r8);
__ add(r11, r11, r7);
__ addi(r11, r11, Operand(FixedArray::kHeaderSize));
@@ -1788,7 +1685,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r11, r3, r6, r7, &runtime, TAG_OBJECT);
+ __ Allocate(r11, r3, r7, r11, &runtime, TAG_OBJECT);
// r3 = address of new object(s) (tagged)
// r5 = argument count (smi-tagged)
@@ -1801,7 +1698,7 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ LoadP(r7,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
- __ cmpi(r4, Operand::Zero());
+ __ cmpi(r9, Operand::Zero());
if (CpuFeatures::IsSupported(ISELECT)) {
__ LoadP(r11, MemOperand(r7, kNormalOffset));
__ LoadP(r7, MemOperand(r7, kAliasedOffset));
@@ -1817,28 +1714,27 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
}
// r3 = address of new object (tagged)
- // r4 = mapped parameter count (tagged)
// r5 = argument count (smi-tagged)
// r7 = address of arguments map (tagged)
+ // r9 = mapped parameter count (tagged)
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ LoadP(r6, MemOperand(sp, 2 * kPointerSize));
- __ AssertNotSmi(r6);
+ __ AssertNotSmi(r4);
const int kCalleeOffset =
JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
- __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r3, kCalleeOffset), r0);
// Use the length (smi tagged) and set that as an in-object property too.
- __ AssertSmi(r5);
+ __ AssertSmi(r8);
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
const int kLengthOffset =
JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
- __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r3, kLengthOffset), r0);
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r7 will point there, otherwise
@@ -1847,35 +1743,35 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
// r3 = address of new object (tagged)
- // r4 = mapped parameter count (tagged)
// r5 = argument count (tagged)
// r7 = address of parameter map or backing store (tagged)
+ // r9 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
if (CpuFeatures::IsSupported(ISELECT)) {
- __ isel(eq, r6, r7, r6);
+ __ isel(eq, r4, r7, r4);
__ beq(&skip_parameter_map);
} else {
Label skip6;
__ bne(&skip6);
- // Move backing store address to r6, because it is
+ // Move backing store address to r4, because it is
// expected there when filling in the unmapped arguments.
- __ mr(r6, r7);
+ __ mr(r4, r7);
__ b(&skip_parameter_map);
__ bind(&skip6);
}
- __ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
- __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0);
- __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+ __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
__ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
r0);
- __ SmiToPtrArrayOffset(r9, r4);
- __ add(r9, r7, r9);
- __ addi(r9, r9, Operand(kParameterMapHeaderSize));
- __ StoreP(r9, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+ __ SmiToPtrArrayOffset(r8, r9);
+ __ add(r8, r8, r7);
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize));
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
r0);
// Copy the parameter slots and the holes in the arguments.
@@ -1886,72 +1782,72 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
- Label parameters_loop, parameters_test;
- __ mr(r9, r4);
- __ LoadP(r11, MemOperand(sp, 0 * kPointerSize));
- __ AddSmiLiteral(r11, r11, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
- __ sub(r11, r11, r4);
- __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
- __ SmiToPtrArrayOffset(r6, r9);
- __ add(r6, r7, r6);
- __ addi(r6, r6, Operand(kParameterMapHeaderSize));
-
- // r9 = loop variable (tagged)
- // r4 = mapping index (tagged)
- // r6 = address of backing store (tagged)
+ Label parameters_loop;
+ __ mr(r8, r9);
+ __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+ __ sub(r11, r11, r9);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ SmiToPtrArrayOffset(r4, r8);
+ __ add(r4, r4, r7);
+ __ addi(r4, r4, Operand(kParameterMapHeaderSize));
+
+ // r4 = address of backing store (tagged)
// r7 = address of parameter map (tagged)
// r8 = temporary scratch (a.o., for address calculation)
- // r10 = the hole value
- __ b(&parameters_test);
+ // r10 = temporary scratch (a.o., for address calculation)
+ // ip = the hole value
+ __ SmiUntag(r8);
+ __ mtctr(r8);
+ __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2));
+ __ add(r10, r4, r8);
+ __ add(r8, r7, r8);
+ __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
__ bind(&parameters_loop);
- __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0);
- __ SmiToPtrArrayOffset(r8, r9);
- __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
- __ StorePX(r11, MemOperand(r8, r7));
- __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
- __ StorePX(r10, MemOperand(r8, r6));
+ __ StorePU(r11, MemOperand(r8, -kPointerSize));
+ __ StorePU(ip, MemOperand(r10, -kPointerSize));
__ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
- __ bind(&parameters_test);
- __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
- __ bne(&parameters_loop);
+ __ bdnz(&parameters_loop);
+
+ // Restore r8 = argument count (tagged).
+ __ LoadP(r8, FieldMemOperand(r3, kLengthOffset));
__ bind(&skip_parameter_map);
- // r5 = argument count (tagged)
- // r6 = address of backing store (tagged)
- // r8 = scratch
+ // r3 = address of new object (tagged)
+ // r4 = address of backing store (tagged)
+ // r8 = argument count (tagged)
+ // r9 = mapped parameter count (tagged)
+ // r11 = scratch
// Copy arguments header and remaining slots (if there are any).
- __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
- __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+ __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0);
+ __ sub(r11, r8, r9, LeaveOE, SetRC);
+ __ Ret(eq, cr0);
- Label arguments_loop, arguments_test;
- __ mr(r11, r4);
- __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
- __ SmiToPtrArrayOffset(r8, r11);
- __ sub(r7, r7, r8);
- __ b(&arguments_test);
+ Label arguments_loop;
+ __ SmiUntag(r11);
+ __ mtctr(r11);
- __ bind(&arguments_loop);
- __ subi(r7, r7, Operand(kPointerSize));
- __ LoadP(r9, MemOperand(r7, 0));
- __ SmiToPtrArrayOffset(r8, r11);
- __ add(r8, r6, r8);
- __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
- __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+ __ SmiToPtrArrayOffset(r0, r9);
+ __ sub(r6, r6, r0);
+ __ add(r11, r4, r0);
+ __ addi(r11, r11,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ bind(&arguments_test);
- __ cmp(r11, r5);
- __ blt(&arguments_loop);
+ __ bind(&arguments_loop);
+ __ LoadPU(r7, MemOperand(r6, -kPointerSize));
+ __ StorePU(r7, MemOperand(r11, kPointerSize));
+ __ bdnz(&arguments_loop);
- // Return and remove the on-stack parameters.
- __ addi(sp, sp, Operand(3 * kPointerSize));
+ // Return.
__ Ret();
// Do the runtime call to allocate the arguments object.
- // r5 = argument count (tagged)
+ // r8 = argument count (tagged)
__ bind(&runtime);
- __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ Push(r4, r6, r8);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -1980,43 +1876,39 @@ void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // sp[0] : number of parameters
- // sp[4] : receiver displacement
- // sp[8] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ beq(&adaptor_frame);
+ // r4 : function
+ // r5 : number of parameters (tagged)
+ // r6 : parameters pointer
- // Get the length from the frame.
- __ LoadP(r4, MemOperand(sp, 0));
- __ b(&try_allocate);
+ DCHECK(r4.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(r5.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(r6.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label try_allocate, runtime;
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&try_allocate);
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r4, MemOperand(sp, 0));
- __ SmiToPtrArrayOffset(r6, r4);
- __ add(r6, r5, r6);
+ __ LoadP(r5, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToPtrArrayOffset(r6, r5);
+ __ add(r6, r6, r7);
__ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
// Try the new space allocation. Start out with computing the size
// of the arguments object and the elements array in words.
Label add_arguments_object;
__ bind(&try_allocate);
- __ cmpi(r4, Operand::Zero());
- __ beq(&add_arguments_object);
- __ SmiUntag(r4);
- __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize));
+ __ SmiUntag(r11, r5, SetRC);
+ __ beq(&add_arguments_object, cr0);
+ __ addi(r11, r11, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ addi(r4, r4, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+ __ addi(r11, r11, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
// Do the allocation of both objects in one go.
- __ Allocate(r4, r3, r5, r6, &runtime,
+ __ Allocate(r11, r3, r7, r8, &runtime,
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
// Get the arguments boilerplate from the current native context.
@@ -2028,88 +1920,54 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
__ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
- __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ LoadRoot(r8, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r8, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r8, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
- __ AssertSmi(r4);
- __ StoreP(r4,
+ __ AssertSmi(r5);
+ __ StoreP(r5,
FieldMemOperand(r3, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
r0);
// If there are no actual arguments, we're done.
- Label done;
- __ cmpi(r4, Operand::Zero());
- __ beq(&done);
-
- // Get the parameters pointer from the stack.
- __ LoadP(r5, MemOperand(sp, 1 * kPointerSize));
+ __ SmiUntag(r9, r5, SetRC);
+ __ Ret(eq, cr0);
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
__ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
- __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
- __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
- // Untag the length for the loop.
- __ SmiUntag(r4);
+ __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
+ __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
// Copy the fixed array slots.
Label loop;
// Set up r7 to point just prior to the first array slot.
__ addi(r7, r7,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
- __ mtctr(r4);
+ __ mtctr(r9);
__ bind(&loop);
- // Pre-decrement r5 with kPointerSize on each iteration.
+ // Pre-decrement r6 with kPointerSize on each iteration.
// Pre-decrement in order to skip receiver.
- __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ __ LoadPU(r8, MemOperand(r6, -kPointerSize));
// Pre-increment r7 with kPointerSize on each iteration.
- __ StorePU(r6, MemOperand(r7, kPointerSize));
+ __ StorePU(r8, MemOperand(r7, kPointerSize));
__ bdnz(&loop);
- // Return and remove the on-stack parameters.
- __ bind(&done);
- __ addi(sp, sp, Operand(3 * kPointerSize));
+ // Return.
__ Ret();
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ Push(r4, r6, r5);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // Stack layout on entry.
- // sp[0] : language mode
- // sp[4] : index of rest parameter
- // sp[8] : number of parameters
- // sp[12] : receiver displacement
-
- Label runtime;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
- __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
- __ bne(&runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
- __ SmiToPtrArrayOffset(r6, r4);
- __ add(r6, r5, r6);
- __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
- __ StoreP(r6, MemOperand(sp, 3 * kPointerSize));
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -2605,27 +2463,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ beq(&done);
__ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset));
__ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex);
- __ bne(FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ bne(&check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(weak_value, &initialize);
__ b(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
- __ bne(&miss);
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&miss);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(&megamorphic);
- __ b(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
+ __ bne(&megamorphic);
+ __ b(&done);
__ bind(&miss);
@@ -2645,24 +2501,22 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function.
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
- __ cmp(r4, r8);
- __ bne(&not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(masm->isolate());
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ b(&done);
-
- __ bind(&not_array_function);
- }
+ // Make sure the function is the Array() function.
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
+ __ bne(&not_array_function);
- CreateWeakCellStub create_stub(masm->isolate());
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(masm->isolate());
CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ b(&done);
+
+ __ bind(&not_array_function);
+
+ CreateWeakCellStub weak_cell_stub(masm->isolate());
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -2692,30 +2546,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
- // Check for function proxy.
- STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
- __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
- __ bne(non_function);
- __ push(r4); // put proxy as additional argument
- __ li(r3, Operand(argc + 1));
- __ li(r5, Operand::Zero());
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ StoreP(r4, MemOperand(sp, argc * kPointerSize), r0);
- __ li(r3, Operand(argc)); // Set up the number of arguments.
- __ li(r5, Operand::Zero());
- __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+static void EmitSlowCase(MacroAssembler* masm, int argc) {
+ __ mov(r3, Operand(argc));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2737,12 +2570,12 @@ static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
bool needs_checks, bool call_as_method) {
// r4 : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function is really a JavaScript function.
// r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &non_function);
+ __ JumpIfSmi(r4, &slow);
// Goto slow case if we do not have a function.
__ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
@@ -2777,7 +2610,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
}
if (call_as_method) {
@@ -2798,38 +2631,31 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// r5 : feedback vector
// r6 : slot in feedback vector (Smi, for RecordCallTarget)
// r7 : original constructor (for IsSuperConstructorCall)
- Label slow, non_function_call;
+ Label non_function;
// Check that the function is not a smi.
- __ JumpIfSmi(r4, &non_function_call);
+ __ JumpIfSmi(r4, &non_function);
// Check that the function is a JSFunction.
__ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE);
- __ bne(&slow);
+ __ bne(&non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ SmiToPtrArrayOffset(r8, r6);
__ add(r8, r5, r8);
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into r5.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by r6 + 1.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
+ // Put the AllocationSite from the feedback vector into r5, or undefined.
+ __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+ __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ if (CpuFeatures::IsSupported(ISELECT)) {
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+ __ isel(eq, r5, r5, r8);
} else {
- // Put the AllocationSite from the feedback vector into r5, or undefined.
- __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
- __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
- __ isel(eq, r5, r5, r8);
- } else {
- Label feedback_register_initialized;
- __ beq(&feedback_register_initialized);
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ __ beq(&feedback_register_initialized);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
}
__ AssertUndefinedOrAllocationSite(r5, r8);
@@ -2842,85 +2668,42 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mr(r6, r4);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = r7;
- __ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(jmp_reg,
- FieldMemOperand(jmp_reg, SharedFunctionInfo::kConstructStubOffset));
- __ addi(ip, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset));
+ __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
__ JumpToJSEntry(ip);
- // r3: number of arguments
- // r4: called object
- // r8: object type
- Label do_call;
- __ bind(&slow);
- STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
- __ cmpi(r8, Operand(JS_FUNCTION_PROXY_TYPE));
- __ bne(&non_function_call);
- __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ b(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r3).
- __ li(r5, Operand::Zero());
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(vector,
- FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(vector,
- FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ __ mr(r6, r4);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r4 - function
// r6 - slot id
// r5 - vector
- Label miss;
- int argc = arg_count();
- ParameterCount actual(argc);
-
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
- __ cmp(r4, r7);
- __ bne(&miss);
+ // r7 - allocation site (loaded from vector[slot])
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmp(r4, r8);
+ __ bne(miss);
__ mov(r3, Operand(arg_count()));
- __ SmiToPtrArrayOffset(r9, r6);
- __ add(r9, r5, r9);
- __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize));
-
- // Verify that r7 contains an AllocationSite
- __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
- __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
- __ bne(&miss);
// Increment the call count for monomorphic function calls.
const int count_offset = FixedArray::kHeaderSize + kPointerSize;
- __ LoadP(r6, FieldMemOperand(r9, count_offset));
+ __ SmiToPtrArrayOffset(r8, r6);
+ __ add(r5, r5, r8);
+ __ LoadP(r6, FieldMemOperand(r5, count_offset));
__ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0);
- __ StoreP(r6, FieldMemOperand(r9, count_offset), r0);
+ __ StoreP(r6, FieldMemOperand(r5, count_offset), r0);
__ mr(r5, r7);
__ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
-
- // Unreachable.
- __ stop("Unexpected code address");
}
@@ -2933,7 +2716,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2987,7 +2770,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(masm, argc, &non_function);
+ EmitSlowCase(masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2995,11 +2778,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
__ beq(&slow_start);
+ // Verify that r7 contains an AllocationSite
+ __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
+ __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+ __ bne(&not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -3072,7 +2865,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function is really a JavaScript function.
// r4: pushed function (to be verified)
- __ JumpIfSmi(r4, &non_function);
+ __ JumpIfSmi(r4, &slow);
// Goto slow case if we do not have a function.
__ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
@@ -3088,10 +2881,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(r4, r5, r6);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to r4 and exit the internal frame.
__ mr(r4, r3);
@@ -3499,15 +3289,10 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ blr();
__ bind(&not_smi);
- Label not_heap_number;
- __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
- // r3: object
- // r4: instance type.
- __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
- __ bne(&not_heap_number);
- __ blr();
- __ bind(&not_heap_number);
+ __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(eq);
Label not_string, slow_string;
__ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
@@ -3531,7 +3316,37 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ bind(&not_oddball);
__ push(r3); // Push argument.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in r3.
+ Label is_number;
+ __ JumpIfSmi(r3, &is_number);
+
+ __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE);
+ // r3: receiver
+ // r4: receiver instance type
+ __ Ret(lt);
+
+ Label not_heap_number;
+ __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
+ __ bne(&not_heap_number);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ cmpi(r4, Operand(ODDBALL_TYPE));
+ __ bne(&not_oddball);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ push(r3); // Push argument.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3655,39 +3470,37 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- Counters* counters = isolate()->counters();
-
- // Stack frame on entry.
- // sp[0]: right string
- // sp[4]: left string
- __ LoadP(r3, MemOperand(sp)); // Load right in r3, left in r4.
- __ LoadP(r4, MemOperand(sp, kPointerSize));
+ // ----------- S t a t e -------------
+ // -- r4 : left
+ // -- r3 : right
+ // -- lr : return address
+ // -----------------------------------
+ __ AssertString(r4);
+ __ AssertString(r3);
Label not_same;
__ cmp(r3, r4);
__ bne(&not_same);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
- __ IncrementCounter(counters->string_compare_native(), 1, r4, r5);
- __ addi(sp, sp, Operand(2 * kPointerSize));
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4,
+ r5);
__ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
- // Compare flat one-byte strings natively. Remove arguments from stack first.
- __ IncrementCounter(counters->string_compare_native(), 1, r5, r6);
- __ addi(sp, sp, Operand(2 * kPointerSize));
+ // Compare flat one-byte strings natively.
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
+ r6);
StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
+ __ Push(r4, r3);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3723,6 +3536,30 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+
+ __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset));
+ __ AssertSmi(r4);
+ __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
+ __ AssertSmi(r3);
+ }
+ __ sub(r3, r4, r3);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -4033,8 +3870,20 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ cmp(r6, r7);
__ bne(&miss);
- __ sub(r3, r3, r4);
- __ Ret();
+ if (Token::IsEqualityOp(op())) {
+ __ sub(r3, r3, r4);
+ __ Ret();
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (op() == Token::LT || op() == Token::LTE) {
+ __ LoadSmiLiteral(r5, Smi::FromInt(GREATER));
+ } else {
+ __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
+ }
+ __ Push(r4, r3, r5);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4607,33 +4456,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, r5);
+ __ EmitLoadTypeFeedbackVector(r5);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, r5);
- CallIC_ArrayStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); }
@@ -4642,11 +4484,10 @@ void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- bool is_polymorphic, Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, bool is_polymorphic,
+ Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4763,8 +4604,7 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ bne(&not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- scratch1, r10, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
@@ -4824,8 +4664,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, r10, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss);
__ bind(&not_array);
// Is it generic?
@@ -4845,8 +4684,7 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ add(feedback, vector, r0);
__ LoadP(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- scratch1, r10, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4858,14 +4696,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4882,11 +4720,52 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
+ Register key = VectorStoreICDescriptor::NameRegister(); // r5
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
+ Register feedback = r8;
+ Register receiver_map = r9;
+ Register scratch1 = r10;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ Register scratch2 = r11;
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map,
+ scratch1, scratch2);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
}
@@ -4900,12 +4779,135 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label start_polymorphic;
+ Label transition_call;
+
+ Register cached_map = scratch1;
+ Register too_far = scratch2;
+ Register pointer_reg = feedback;
+ __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+
+ // +-----+------+------+-----+-----+-----+ ... ----+
+ // | map | len | wm0 | wt0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch2
+ // also need receiver_map
+ // use cached_map (scratch1) to look in the weak map values.
+ __ SmiToPtrArrayOffset(r0, too_far);
+ __ add(too_far, feedback, r0);
+ __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ LoadP(cached_map, MemOperand(pointer_reg));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ bne(&prepare_next);
+ // Is it a transitioning store?
+ __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize));
+ __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
+ __ bne(&transition_call);
+ __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
+ __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&transition_call);
+ __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
+ __ JumpIfSmi(too_far, miss);
+
+ __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
+
+ // Load the map into the correct register.
+ DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ mr(feedback, too_far);
+
+ __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&prepare_next);
+ __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
+ __ cmpl(pointer_reg, too_far);
+ __ blt(&next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ b(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // r4
+ Register key = VectorStoreICDescriptor::NameRegister(); // r5
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // r6
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // r7
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3)); // r3
+ Register feedback = r8;
+ Register receiver_map = r9;
+ Register scratch1 = r10;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
+ scratch1, &compare_map, &load_smi_map, &try_array);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+
+ Register scratch2 = r11;
+
+ HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
+ &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ bne(&miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
index 85f3c9ca98..bc6c26b217 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.h
+++ b/deps/v8/src/ppc/code-stubs-ppc.h
@@ -147,8 +147,9 @@ class RecordWriteStub : public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start() + Assembler::kInstrSize,
- 2 * Assembler::kInstrSize);
+ Assembler::FlushICache(stub->GetIsolate(),
+ stub->instruction_start() + Assembler::kInstrSize,
+ 2 * Assembler::kInstrSize);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 2238695587..b313d11bb3 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ppc/codegen-ppc.h"
+
#if V8_TARGET_ARCH_PPC
#include "src/codegen.h"
@@ -60,7 +62,7 @@ UnaryMathFunction CreateExpFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
#if !defined(USE_SIMULATOR)
@@ -97,7 +99,7 @@ UnaryMathFunction CreateSqrtFunction() {
DCHECK(!RelocInfo::RequiresRelocation(desc));
#endif
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
@@ -658,7 +660,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
index 079aa23403..a42fa53960 100644
--- a/deps/v8/src/ppc/cpu-ppc.cc
+++ b/deps/v8/src/ppc/cpu-ppc.cc
@@ -8,26 +8,12 @@
#include "src/assembler.h"
#include "src/macro-assembler.h"
-#include "src/simulator.h" // for cache flushing.
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* buffer, size_t size) {
- // Nothing to do flushing no instructions.
- if (size == 0) {
- return;
- }
-
-#if defined(USE_SIMULATOR)
- // Not generating PPC instructions for C-code. This means that we are
- // building an PPC emulator based target. We should notify the simulator
- // that the Icache was flushed.
- // None of this code ends up in the snapshot so there are no issues
- // around whether or not to generate the code when building snapshots.
- Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), buffer, size);
-#else
-
+#if !defined(USE_SIMULATOR)
if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
__asm__ __volatile__(
"sync \n"
@@ -54,7 +40,7 @@ void CpuFeatures::FlushICache(void* buffer, size_t size) {
: "r"(pointer));
}
-#endif // USE_SIMULATOR
+#endif // !USE_SIMULATOR
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
index 4437a167fc..e86ec681ec 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -10,6 +10,7 @@
#include "src/ppc/assembler-ppc.h"
#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/frames-ppc.h"
#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 9f8292f96b..c123e7c602 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -31,6 +31,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return r7; }
const Register VectorStoreICDescriptor::VectorRegister() { return r6; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return r7; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return r6; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return r8; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return r6; }
@@ -41,14 +46,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r5; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r3; }
-const Register InstanceofDescriptor::left() { return r3; }
-const Register InstanceofDescriptor::right() { return r4; }
+const Register InstanceOfDescriptor::LeftRegister() { return r4; }
+const Register InstanceOfDescriptor::RightRegister() { return r3; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return r4; }
+const Register StringCompareDescriptor::RightRegister() { return r3; }
const Register ArgumentsAccessReadDescriptor::index() { return r4; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
+const Register ArgumentsAccessNewDescriptor::function() { return r4; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return r5; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return r6; }
+
+
const Register ApiGetterDescriptor::function_address() { return r5; }
@@ -64,10 +78,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -94,6 +108,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return r3; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return r3; }
@@ -181,6 +199,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r4 : the target to call
+ Register registers[] = {r4, r3};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r5, r4, r3};
@@ -362,6 +389,17 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ r3, // argument count (including receiver)
+ r5, // address of first argument
+ r4 // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
index de416b3fdb..ad6d8db13d 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -5,12 +5,12 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/ppc/lithium-codegen-ppc.h"
#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
@@ -120,8 +120,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info_->language_mode()) && info_->MayUseThis() &&
- !info_->is_native() && info_->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
__ LoadP(r5, MemOperand(sp, receiver_offset));
@@ -177,16 +176,27 @@ bool LCodeGen::GeneratePrologue() {
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info()->scope()->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is in r4.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(r4);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -194,7 +204,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(r4);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in both r3 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ mr(cp, r3);
@@ -227,13 +238,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so cp still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -750,7 +755,6 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -980,11 +984,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2180,6 +2179,13 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
+}
+
+
+template <class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
int false_block = instr->FalseDestination(chunk_);
__ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
@@ -2530,42 +2536,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input, Register temp1,
- Label* is_not_object, Label* is_object) {
- Register temp2 = scratch0();
- __ JumpIfSmi(input, is_not_object);
-
- __ LoadRoot(temp2, Heap::kNullValueRootIndex);
- __ cmp(input, temp2);
- __ beq(is_object);
-
- // Load map.
- __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
- __ TestBit(temp2, Map::kIsUndetectable, r0);
- __ bne(is_not_object, cr0);
-
- // Load instance type and check that it is in object type range.
- __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ blt(is_not_object);
- __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return le;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp1 = ToRegister(instr->temp());
-
- Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
- instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond);
-}
-
-
Condition LCodeGen::EmitIsString(Register input, Register temp1,
Label* is_not_string,
SmiCheck check_needed = INLINE_SMI_CHECK) {
@@ -2635,17 +2605,14 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- Token::Value op = instr->op();
+ DCHECK(ToRegister(instr->left()).is(r4));
+ DCHECK(ToRegister(instr->right()).is(r3));
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
- // This instruction also signals no smi code inlined
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
__ cmpi(r3, Operand::Zero());
- Condition condition = ComputeCompareCondition(op);
-
- EmitBranch(instr, condition);
+ EmitBranch(instr, ComputeCompareCondition(instr->op()));
}
@@ -2795,157 +2762,42 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(cp));
- DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
- DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
-
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(r3));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- if (CpuFeatures::IsSupported(ISELECT)) {
- __ mov(r4, Operand(factory()->true_value()));
- __ mov(r5, Operand(factory()->false_value()));
- __ cmpi(r3, Operand::Zero());
- __ isel(eq, r3, r4, r5);
- } else {
- Label equal, done;
- __ cmpi(r3, Operand::Zero());
- __ beq(&equal);
- __ mov(r3, Operand(factory()->false_value()));
- __ b(&done);
-
- __ bind(&equal);
- __ mov(r3, Operand(factory()->true_value()));
- __ bind(&done);
- }
}
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) {}
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
- &load_bool_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
- Label* load_bool() { return &load_bool_; }
-
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- Label load_bool_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = scratch0();
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
- Register result = ToRegister(instr->result());
-
- // A Smi is not instance of anything.
- __ JumpIfSmi(object, &false_result);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = temp;
- __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- {
- // Block trampoline emission to ensure the positions of instructions are
- // as expected by the patcher. See InstanceofStub::Generate().
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- __ bind(deferred->map_check()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch with
- // the cached map.
- Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ mov(ip, Operand(cell));
- __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
- __ cmp(map, ip);
- __ bc_short(ne, &cache_miss);
- __ bind(deferred->load_bool()); // Label for calculating code patching.
- // We use Factory::the_hole_value() on purpose instead of loading from the
- // root array to force relocation to be able to later patch
- // with true or false.
- __ mov(result, Operand(factory()->the_hole_value()));
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ TestIfSmi(object, r0);
+ EmitFalseBranch(instr, eq, cr0);
}
- __ b(&done);
-
- // The inlined call site cache did not match. Check null and string before
- // calling the deferred code.
- __ bind(&cache_miss);
- // Null is not instance of anything.
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(object, ip);
- __ beq(&false_result);
-
- // String values is not instance of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp);
- __ b(is_string, &false_result, cr0);
-
- // Go to the deferred code.
- __ b(deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
-
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check,
- Label* bool_load) {
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(flags |
- InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
-
- PushSafepointRegistersScope scope(this);
- LoadContextFromDeferred(instr->context());
-
- __ Move(InstanceofStub::right(), instr->function());
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Handle<Code> code = stub.GetCode();
- // Include instructions below in delta: bitwise_mov32 + li + call
- int additional_delta = 3 * Instruction::kInstrSize + masm_->CallSize(code);
- // The labels must be already bound since the code has predictabel size up
- // to the call instruction.
- DCHECK(map_check->is_bound());
- DCHECK(bool_load->is_bound());
- int map_check_delta =
- masm_->InstructionsGeneratedSince(map_check) * Instruction::kInstrSize;
- int bool_load_delta =
- masm_->InstructionsGeneratedSince(bool_load) * Instruction::kInstrSize;
- // r8 is the delta from our callee's lr to the location of the map check.
- __ bitwise_mov32(r8, map_check_delta + additional_delta);
- // r9 is the delta from map check to bool load.
- __ li(r9, Operand(map_check_delta - bool_load_delta));
- CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK_EQ((map_check_delta + additional_delta) / Instruction::kInstrSize,
- masm_->InstructionsGeneratedSince(map_check));
- }
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Put the result value (r3) into the result register slot and
- // restore all registers.
- __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ LoadP(object_prototype,
+ FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, eq);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, eq);
+ __ LoadP(object_map,
+ FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+ __ b(&loop);
}
@@ -3793,11 +3645,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
- // Set r3 to arguments count if adaption is not needed. Assumes that r3
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ mov(r3, Operand(arity));
- }
+ // Always initialize r3 to the number of actual arguments.
+ __ mov(r3, Operand(arity));
bool is_self_call = function.is_identical_to(info()->closure());
@@ -4210,9 +4059,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(r4));
DCHECK(ToRegister(instr->result()).is(r3));
- if (instr->hydrogen()->pass_argument_count()) {
- __ mov(r3, Operand(instr->arity()));
- }
+ __ mov(r3, Operand(instr->arity()));
// Change context.
__ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
@@ -5848,7 +5695,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// r3 = regexp literal clone.
// r5 and r7-r9 are used as temporaries.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ Move(r10, instr->hydrogen()->literals());
__ LoadP(r4, FieldMemOperand(r10, literal_offset));
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -5883,26 +5730,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(cp));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ mov(r5, Operand(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ mov(r5, Operand(instr->hydrogen()->shared_info()));
- __ mov(r4, Operand(pretenure ? factory()->true_value()
- : factory()->false_value()));
- __ Push(cp, r5, r4);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->value()).is(r6));
DCHECK(ToRegister(instr->result()).is(r3));
@@ -5969,24 +5796,25 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
final_branch_condition = ne;
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- Register type_reg = scratch;
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
- __ beq(true_label);
- __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ andi(scratch, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmpi(scratch, Operand(1 << Map::kIsCallable));
final_branch_condition = eq;
} else if (String::Equals(type_name, factory->object_string())) {
- Register map = scratch;
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ beq(true_label);
- __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
- // Check for undetectable objects => false.
- __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
- __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(input, scratch, ip, FIRST_SPEC_OBJECT_TYPE);
+ __ blt(false_label);
+ // Check for callable or undetectable objects => false.
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ andi(r0, scratch,
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
__ cmpi(r0, Operand::Zero());
final_branch_condition = eq;
@@ -6037,7 +5865,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/ppc/lithium-codegen-ppc.h
index 392bbf5872..117dc574d5 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.h
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.h
@@ -111,8 +111,6 @@ class LCodeGen : public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check, Label* bool_load);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
Register object, Register index);
@@ -243,6 +241,8 @@ class LCodeGen : public LCodeGenBase {
template <class InstrType>
void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
+ template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition,
CRegister cr = cr7);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
@@ -254,12 +254,6 @@ class LCodeGen : public LCodeGenBase {
Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
Handle<String> type_name);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input, Register temp1, Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
index 6841db5d32..767c771fb3 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/ppc/lithium-ppc.h"
+
#include <sstream>
#include "src/hydrogen-osr.h"
@@ -183,13 +185,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -935,28 +930,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->SetDeferredLazyDeoptimizationEnvironment(
- bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new (zone()) LGoto(instr->FirstSuccessor());
}
@@ -1008,19 +1000,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), cp);
- LInstanceOf* result = new (zone()) LInstanceOf(
- context, UseFixed(instr->left(), r3), UseFixed(instr->right(), r4));
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, r3), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result = new (zone())
- LInstanceOfKnownGlobal(UseFixed(instr->context(), cp),
- UseFixed(instr->left(), r3), FixedTemp(r7));
- return MarkAsCall(DefineFixed(result, r3), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
@@ -1745,14 +1739,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- return new (zone()) LIsObjectAndBranch(value, temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -2460,13 +2446,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), cp);
- return MarkAsCall(DefineFixed(new (zone()) LFunctionLiteral(context), r3),
- instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
index 99ff9fedb7..e862a11f63 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -79,19 +79,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -132,6 +130,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -231,8 +230,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {}
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -387,6 +384,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) {}
@@ -973,23 +976,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1147,41 +1133,27 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* left() { return inputs_[1]; }
- LOperand* right() { return inputs_[2]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2487,17 +2459,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) { inputs_[0] = context; }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 676cb2c60e..e973471572 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -11,8 +11,8 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
+#include "src/ppc/macro-assembler-ppc.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -753,6 +753,14 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ LoadP(vector,
+ FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
@@ -987,10 +995,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ mov(r3, Operand(actual.immediate()));
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- mov(r3, Operand(actual.immediate()));
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -1005,9 +1013,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
}
} else {
if (actual.is_immediate()) {
+ mov(r3, Operand(actual.immediate()));
cmpi(expected.reg(), Operand(actual.immediate()));
beq(&regular_invoke);
- mov(r3, Operand(actual.immediate()));
} else {
cmp(expected.reg(), actual.reg());
beq(&regular_invoke);
@@ -1122,23 +1130,6 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
- Register scratch, Label* fail) {
- LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
- Label* fail) {
- lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- blt(fail);
- cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- bgt(fail);
-}
-
-
void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
Label* fail) {
DCHECK(kNotStringTag != 0);
@@ -1701,20 +1692,6 @@ void MacroAssembler::CompareObjectType(Register object, Register map,
}
-void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
- InstanceType min_type,
- InstanceType max_type,
- Label* false_label) {
- STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
- STATIC_ASSERT(LAST_TYPE < 256);
- LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
- subi(ip, ip, Operand(min_type));
- cmpli(ip, Operand(max_type - min_type));
- bgt(false_label);
-}
-
-
void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
InstanceType type) {
STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
@@ -1979,36 +1956,7 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function. Load map into result reg.
- CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
- bne(miss);
-
- LoadP(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- lwz(scratch,
- FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
- TestBit(scratch,
-#if V8_TARGET_ARCH_PPC64
- SharedFunctionInfo::kBoundFunction,
-#else
- SharedFunctionInfo::kBoundFunction + kSmiTagSize,
-#endif
- r0);
- bne(miss, cr0);
-
- // Make sure that the function has an instance prototype.
- lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
- bne(&non_instance, cr0);
- }
-
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
LoadP(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -2028,15 +1976,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
// Get the prototype from the initial map.
LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- b(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch, ip);
- }
-
// All done.
bind(&done);
}
@@ -2312,12 +2251,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
- GetBuiltinEntry(ip, id);
+ GetBuiltinEntry(ip, native_context_index);
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(ip));
CallJSEntry(ip);
@@ -2330,21 +2269,20 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
LoadP(target,
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ LoadP(target, FieldMemOperand(target, GlobalObject::kNativeContextOffset));
// Load the JavaScript builtin function from the builtins object.
- LoadP(target,
- FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
- r0);
+ LoadP(target, ContextOperand(target, native_context_index), r0);
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(r4));
- GetBuiltinFunction(r4, id);
+ GetBuiltinFunction(r4, native_context_index);
// Load the code entry point from the builtins object.
LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
}
@@ -2468,6 +2406,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ LoadP(dst, GlobalObjectOperand());
+ LoadP(dst, FieldMemOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind, ElementsKind transitioned_kind,
Register map_in_out, Register scratch, Label* no_map_match) {
@@ -2644,6 +2588,19 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ STATIC_ASSERT(kSmiTag == 0);
+ TestIfSmi(object, r0);
+ Check(ne, kOperandIsASmiAndNotAFunction, cr0);
+ push(object);
+ CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
+ pop(object);
+ Check(eq, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
if (emit_debug_code()) {
@@ -2678,78 +2635,6 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
}
-void MacroAssembler::LookupNumberStringCache(Register object, Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
- subi(mask, mask, Operand(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- JumpIfSmi(object, &is_smi);
- CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- xor_(scratch1, scratch1, scratch2);
- and_(scratch1, scratch1, mask);
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
- add(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
- lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
- fcmpu(d0, d1);
- bne(not_found); // The cache did not contain this value.
- b(&load_result_from_cache);
-
- bind(&is_smi);
- Register scratch = scratch1;
- SmiUntag(scratch, object);
- and_(scratch, mask, scratch);
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
- add(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- cmp(object, probe);
- bne(not_found);
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- LoadP(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
- scratch1, scratch2);
-}
-
-
void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
@@ -3244,175 +3129,6 @@ void MacroAssembler::DecodeConstantPoolOffset(Register result,
}
-void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
- Register new_value) {
- lwz(scratch, MemOperand(location));
-
- if (FLAG_enable_embedded_constant_pool) {
- if (emit_debug_code()) {
- // Check that the instruction sequence is a load from the constant pool
- ExtractBitMask(scratch, scratch, 0x1f * B16);
- cmpi(scratch, Operand(kConstantPoolRegister.code()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- // Scratch was clobbered. Restore it.
- lwz(scratch, MemOperand(location));
- }
- DecodeConstantPoolOffset(scratch, location);
- StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
- return;
- }
-
- // This code assumes a FIXED_SEQUENCE for lis/ori
-
- // At this point scratch is a lis instruction.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
- Cmpi(scratch, Operand(ADDIS), r0);
- Check(eq, kTheInstructionToPatchShouldBeALis);
- lwz(scratch, MemOperand(location));
- }
-
-// insert new high word into lis instruction
-#if V8_TARGET_ARCH_PPC64
- srdi(ip, new_value, Operand(32));
- rlwimi(scratch, ip, 16, 16, 31);
-#else
- rlwimi(scratch, new_value, 16, 16, 31);
-#endif
-
- stw(scratch, MemOperand(location));
-
- lwz(scratch, MemOperand(location, kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, kInstrSize));
- }
-
-// insert new low word into ori instruction
-#if V8_TARGET_ARCH_PPC64
- rlwimi(scratch, ip, 0, 16, 31);
-#else
- rlwimi(scratch, new_value, 0, 16, 31);
-#endif
- stw(scratch, MemOperand(location, kInstrSize));
-
-#if V8_TARGET_ARCH_PPC64
- if (emit_debug_code()) {
- lwz(scratch, MemOperand(location, 2 * kInstrSize));
- // scratch is now sldi.
- And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
- Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
- Check(eq, kTheInstructionShouldBeASldi);
- }
-
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORIS), r0);
- Check(eq, kTheInstructionShouldBeAnOris);
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- }
-
- rlwimi(scratch, new_value, 16, 16, 31);
- stw(scratch, MemOperand(location, 3 * kInstrSize));
-
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- }
- rlwimi(scratch, new_value, 0, 16, 31);
- stw(scratch, MemOperand(location, 4 * kInstrSize));
-#endif
-
-// Update the I-cache so the new lis and addic can be executed.
-#if V8_TARGET_ARCH_PPC64
- FlushICache(location, 5 * kInstrSize, scratch);
-#else
- FlushICache(location, 2 * kInstrSize, scratch);
-#endif
-}
-
-
-void MacroAssembler::GetRelocatedValue(Register location, Register result,
- Register scratch) {
- lwz(result, MemOperand(location));
-
- if (FLAG_enable_embedded_constant_pool) {
- if (emit_debug_code()) {
- // Check that the instruction sequence is a load from the constant pool
- ExtractBitMask(result, result, 0x1f * B16);
- cmpi(result, Operand(kConstantPoolRegister.code()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- lwz(result, MemOperand(location));
- }
- DecodeConstantPoolOffset(result, location);
- LoadPX(result, MemOperand(kConstantPoolRegister, result));
- return;
- }
-
- // This code assumes a FIXED_SEQUENCE for lis/ori
- if (emit_debug_code()) {
- And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
- Cmpi(result, Operand(ADDIS), r0);
- Check(eq, kTheInstructionShouldBeALis);
- lwz(result, MemOperand(location));
- }
-
- // result now holds a lis instruction. Extract the immediate.
- slwi(result, result, Operand(16));
-
- lwz(scratch, MemOperand(location, kInstrSize));
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, kInstrSize));
- }
- // Copy the low 16bits from ori instruction into result
- rlwimi(result, scratch, 0, 16, 31);
-
-#if V8_TARGET_ARCH_PPC64
- if (emit_debug_code()) {
- lwz(scratch, MemOperand(location, 2 * kInstrSize));
- // scratch is now sldi.
- And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
- Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
- Check(eq, kTheInstructionShouldBeASldi);
- }
-
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORIS), r0);
- Check(eq, kTheInstructionShouldBeAnOris);
- lwz(scratch, MemOperand(location, 3 * kInstrSize));
- }
- sldi(result, result, Operand(16));
- rldimi(result, scratch, 0, 48);
-
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- // scratch is now ori.
- if (emit_debug_code()) {
- And(scratch, scratch, Operand(kOpcodeMask));
- Cmpi(scratch, Operand(ORI), r0);
- Check(eq, kTheInstructionShouldBeAnOri);
- lwz(scratch, MemOperand(location, 4 * kInstrSize));
- }
- sldi(result, result, Operand(16));
- rldimi(result, scratch, 0, 48);
-#endif
-}
-
-
void MacroAssembler::CheckPageFlag(
Register object,
Register scratch, // scratch may be same register as object
@@ -4040,6 +3756,25 @@ void MacroAssembler::MovDoubleToInt64(
}
+void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+ subi(sp, sp, Operand(kFloatSize));
+ stw(src, MemOperand(sp, 0));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfs(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kFloatSize));
+}
+
+
+void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+ subi(sp, sp, Operand(kFloatSize));
+ frsp(src, src);
+ stfs(src, MemOperand(sp, 0));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lwz(dst, MemOperand(sp, 0));
+ addi(sp, sp, Operand(kFloatSize));
+}
+
+
void MacroAssembler::Add(Register dst, Register src, intptr_t value,
Register scratch) {
if (is_int16(value)) {
@@ -4601,7 +4336,7 @@ CodePatcher::CodePatcher(byte* address, int instructions,
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICacheWithoutIsolate(address_, size_);
}
// Check that the code was patched as expected.
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 64396bb3a4..f87c563e72 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -415,6 +415,9 @@ class MacroAssembler : public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -494,6 +497,8 @@ class MacroAssembler : public Assembler {
Register dst_hi,
#endif
Register dst, DoubleRegister src);
+ void MovIntToFloat(DoubleRegister dst, Register src);
+ void MovFloatToInt(Register dst, DoubleRegister src);
void Add(Register dst, Register src, intptr_t value, Register scratch);
void Cmpi(Register src1, const Operand& src2, Register scratch,
@@ -549,11 +554,6 @@ class MacroAssembler : public Assembler {
const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper);
- void IsObjectJSObjectType(Register heap_object, Register map,
- Register scratch, Label* fail);
-
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
void IsObjectJSStringType(Register object, Register scratch, Label* fail);
void IsObjectNameType(Register object, Register scratch, Label* fail);
@@ -704,8 +704,7 @@ class MacroAssembler : public Assembler {
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function, Register result,
- Register scratch, Label* miss,
- bool miss_on_bound_function = false);
+ Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
@@ -718,13 +717,6 @@ class MacroAssembler : public Assembler {
void CompareObjectType(Register heap_object, Register map, Register type_reg,
InstanceType type);
- // Compare object type for heap object. Branch to false_label if type
- // is lower than min_type or greater than max_type.
- // Load map into the register map.
- void CheckObjectTypeRange(Register heap_object, Register map,
- InstanceType min_type, InstanceType max_type,
- Label* false_label);
-
// Compare instance type in a map. map contains a valid map object whose
// object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@@ -794,7 +786,23 @@ class MacroAssembler : public Assembler {
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index) {
+ LoadRoot(r0, index);
+ Push(r0);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
+ CompareRoot(with, index);
+ beq(if_equal);
+ }
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal) {
+ CompareRoot(with, index);
+ bne(if_not_equal);
+ }
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
@@ -888,21 +896,9 @@ class MacroAssembler : public Assembler {
void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
- void RetOnOverflow(void) {
- Label label;
+ void RetOnOverflow(void) { Ret(lt, cr0); }
- blt(&label, cr0);
- Ret();
- bind(&label);
- }
-
- void RetOnNoOverflow(void) {
- Label label;
-
- bge(&label, cr0);
- Ret();
- bind(&label);
- }
+ void RetOnNoOverflow(void) { Ret(ge, cr0); }
// ---------------------------------------------------------------------------
// Runtime calls
@@ -984,17 +980,16 @@ class MacroAssembler : public Assembler {
// Jump to a runtime routine.
void JumpToExternalReference(const ExternalReference& builtin);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
Handle<Object> CodeObject() {
DCHECK(!code_object_.is_null());
@@ -1314,6 +1309,8 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
@@ -1331,15 +1328,6 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// String utilities
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object, Register result,
- Register scratch1, Register scratch2,
- Register scratch3, Label* not_found);
-
// Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
@@ -1378,11 +1366,6 @@ class MacroAssembler : public Assembler {
// Caller must place the instruction word at <location> in <result>.
void DecodeConstantPoolOffset(Register result, Register location);
- // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
- void GetRelocatedValue(Register location, Register result, Register scratch);
- void SetRelocatedValue(Register location, Register scratch,
- Register new_value);
-
void ClampUint8(Register output_reg, Register input_reg);
// Saturate a value into 8-bit unsigned integer
@@ -1433,6 +1416,9 @@ class MacroAssembler : public Assembler {
DecodeFieldToSmi<Field>(reg, reg);
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg = false);
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index b1541f616a..1bdcb85db7 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -102,8 +102,8 @@ PreParserExpression PreParserTraits::ParseFunctionLiteral(
PreParser::PreParseResult PreParser::PreParseLazyFunction(
- LanguageMode language_mode, FunctionKind kind, ParserRecorder* log,
- Scanner::BookmarkScope* bookmark) {
+ LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
+ ParserRecorder* log, Scanner::BookmarkScope* bookmark) {
log_ = log;
// Lazy functions always have trivial outer scopes (no with/catch scopes).
Scope* top_scope = NewScope(scope_, SCRIPT_SCOPE);
@@ -113,6 +113,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
scope_->SetLanguageMode(language_mode);
Scope* function_scope = NewScope(
scope_, IsArrowFunction(kind) ? ARROW_SCOPE : FUNCTION_SCOPE, kind);
+ if (!has_simple_parameters) function_scope->SetHasNonSimpleParameters();
PreParserFactory function_factory(NULL);
FunctionState function_state(&function_state_, &scope_, function_scope, kind,
&function_factory);
@@ -121,7 +122,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
int start_position = peek_position();
ParseLazyFunctionLiteralBody(&ok, bookmark);
if (bookmark && bookmark->HasBeenReset()) {
- ; // Do nothing, as we've just aborted scanning this function.
+ // Do nothing, as we've just aborted scanning this function.
} else if (stack_overflow()) {
return kPreParseStackOverflow;
} else if (!ok) {
@@ -198,7 +199,7 @@ PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
}
break;
case Token::LET:
- if (allow_let()) {
+ if (IsNextLetKeyword()) {
return ParseVariableStatement(kStatementListItem, ok);
}
break;
@@ -231,9 +232,8 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
Statement statement = ParseStatementListItem(ok);
if (!*ok) return;
- if (is_strong(language_mode()) &&
- scope_->is_function_scope() &&
- i::IsConstructor(function_state_->kind())) {
+ if (is_strong(language_mode()) && scope_->is_function_scope() &&
+ IsClassConstructor(function_state_->kind())) {
Scanner::Location this_loc = function_state_->this_location();
Scanner::Location super_loc = function_state_->super_location();
if (this_loc.beg_pos != old_this_loc.beg_pos &&
@@ -251,15 +251,41 @@ void PreParser::ParseStatementList(int end_token, bool* ok,
}
if (directive_prologue) {
- if (statement.IsUseStrictLiteral()) {
+ bool use_strict_found = statement.IsUseStrictLiteral();
+ bool use_strong_found =
+ statement.IsUseStrongLiteral() && allow_strong_mode();
+
+ if (use_strict_found) {
scope_->SetLanguageMode(
static_cast<LanguageMode>(scope_->language_mode() | STRICT));
- } else if (statement.IsUseStrongLiteral() && allow_strong_mode()) {
+ } else if (use_strong_found) {
scope_->SetLanguageMode(static_cast<LanguageMode>(
scope_->language_mode() | STRONG));
+ if (IsClassConstructor(function_state_->kind())) {
+ // "use strong" cannot occur in a class constructor body, to avoid
+ // unintuitive strong class object semantics.
+ PreParserTraits::ReportMessageAt(
+ token_loc, MessageTemplate::kStrongConstructorDirective);
+ *ok = false;
+ return;
+ }
} else if (!statement.IsStringLiteral()) {
directive_prologue = false;
}
+
+ if ((use_strict_found || use_strong_found) &&
+ !scope_->HasSimpleParameters()) {
+ // TC39 deemed "use strict" directives to be an error when occurring
+ // in the body of a function with non-simple parameter list, on
+ // 29/7/2015. https://goo.gl/ueA7Ln
+ //
+ // In V8, this also applies to "use strong " directives.
+ PreParserTraits::ReportMessageAt(
+ token_loc, MessageTemplate::kIllegalLanguageModeDirective,
+ use_strict_found ? "use strict" : "use strong");
+ *ok = false;
+ return;
+ }
}
// If we're allowed to reset to a bookmark, we will do so when we see a long
@@ -612,7 +638,7 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
// Fall through.
case Token::SUPER:
if (is_strong(language_mode()) &&
- i::IsConstructor(function_state_->kind())) {
+ IsClassConstructor(function_state_->kind())) {
bool is_this = peek() == Token::THIS;
Expression expr = Expression::Default();
ExpressionClassifier classifier;
@@ -673,8 +699,9 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
}
// Parsed expression statement.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
- expr.IsIdentifier() && expr.AsIdentifier().IsLet()) {
+ if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
+ is_sloppy(language_mode()) && expr.IsIdentifier() &&
+ expr.AsIdentifier().IsLet()) {
ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return Statement::Default();
@@ -762,7 +789,7 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
tok != Token::RBRACE &&
tok != Token::EOS) {
if (is_strong(language_mode()) &&
- i::IsConstructor(function_state_->kind())) {
+ IsClassConstructor(function_state_->kind())) {
int pos = peek_position();
ReportMessageAt(Scanner::Location(pos, pos + 1),
MessageTemplate::kStrongConstructorReturnValue);
@@ -874,7 +901,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
if (peek() != Token::SEMICOLON) {
ForEachStatement::VisitMode mode;
if (peek() == Token::VAR || (peek() == Token::CONST && allow_const()) ||
- (peek() == Token::LET && allow_let())) {
+ (peek() == Token::LET && IsNextLetKeyword())) {
int decl_count;
Scanner::Location first_initializer_loc = Scanner::Location::invalid();
Scanner::Location bindings_loc = Scanner::Location::invalid();
@@ -933,8 +960,8 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
// Parsed initializer at this point.
// Detect attempts at 'let' declarations in sloppy mode.
- if (peek() == Token::IDENTIFIER && is_sloppy(language_mode()) &&
- is_let_identifier_expression) {
+ if (!allow_harmony_sloppy_let() && peek() == Token::IDENTIFIER &&
+ is_sloppy(language_mode()) && is_let_identifier_expression) {
ReportMessage(MessageTemplate::kSloppyLexical, NULL);
*ok = false;
return Statement::Default();
@@ -1057,7 +1084,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
Expect(Token::LPAREN, CHECK_OK);
int start_position = scanner()->location().beg_pos;
function_scope->set_start_position(start_position);
- PreParserFormalParameters formals(nullptr);
+ PreParserFormalParameters formals(function_scope);
ParseFormalParameterList(&formals, &formals_classifier, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
int formals_end_position = scanner()->location().end_pos;
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index f8f20e530a..50dbcf1a46 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -31,6 +31,7 @@ struct FormalParametersBase {
bool has_rest = false;
bool is_simple = true;
int materialized_literals_count = 0;
+ mutable int rest_array_literal_index = -1;
};
@@ -112,7 +113,7 @@ class ParserBase : public Traits {
allow_harmony_sloppy_let_(false),
allow_harmony_rest_parameters_(false),
allow_harmony_default_parameters_(false),
- allow_harmony_spreadcalls_(false),
+ allow_harmony_spread_calls_(false),
allow_harmony_destructuring_(false),
allow_harmony_spread_arrays_(false),
allow_harmony_new_target_(false),
@@ -131,7 +132,7 @@ class ParserBase : public Traits {
ALLOW_ACCESSORS(harmony_sloppy_let);
ALLOW_ACCESSORS(harmony_rest_parameters);
ALLOW_ACCESSORS(harmony_default_parameters);
- ALLOW_ACCESSORS(harmony_spreadcalls);
+ ALLOW_ACCESSORS(harmony_spread_calls);
ALLOW_ACCESSORS(harmony_destructuring);
ALLOW_ACCESSORS(harmony_spread_arrays);
ALLOW_ACCESSORS(harmony_new_target);
@@ -338,6 +339,11 @@ class ParserBase : public Traits {
return scanner()->peek();
}
+ INLINE(Token::Value PeekAhead()) {
+ if (stack_overflow_) return Token::ILLEGAL;
+ return scanner()->PeekAhead();
+ }
+
INLINE(Token::Value Next()) {
if (stack_overflow_) return Token::ILLEGAL;
{
@@ -548,6 +554,14 @@ class ParserBase : public Traits {
}
}
+ void ValidateFormalParameterInitializer(
+ const ExpressionClassifier* classifier, bool* ok) {
+ if (!classifier->is_valid_formal_parameter_initializer()) {
+ ReportClassifierError(classifier->formal_parameter_initializer_error());
+ *ok = false;
+ }
+ }
+
void ValidateBindingPattern(const ExpressionClassifier* classifier,
bool* ok) {
if (!classifier->is_valid_binding_pattern()) {
@@ -629,6 +643,15 @@ class ParserBase : public Traits {
message, arg);
}
+ void FormalParameterInitializerUnexpectedToken(
+ ExpressionClassifier* classifier) {
+ MessageTemplate::Template message = MessageTemplate::kUnexpectedToken;
+ const char* arg;
+ GetUnexpectedTokenMessage(peek(), &message, &arg);
+ classifier->RecordFormalParameterInitializerError(
+ scanner()->peek_location(), message, arg);
+ }
+
// Recursive descent functions:
// Parses an identifier that is valid for the current scope, in particular it
@@ -710,6 +733,8 @@ class ParserBase : public Traits {
int param_count, FunctionLiteral::ArityRestriction arity_restriction,
bool has_rest, int formals_start_pos, int formals_end_pos, bool* ok);
+ bool IsNextLetKeyword();
+
// Checks if the expression is a valid reference expression (e.g., on the
// left-hand side of assignments). Although ruled out by ECMA as early errors,
// we allow calls for web compatibility and rewrite them to a runtime throw.
@@ -808,7 +833,7 @@ class ParserBase : public Traits {
bool allow_harmony_sloppy_let_;
bool allow_harmony_rest_parameters_;
bool allow_harmony_default_parameters_;
- bool allow_harmony_spreadcalls_;
+ bool allow_harmony_spread_calls_;
bool allow_harmony_destructuring_;
bool allow_harmony_spread_arrays_;
bool allow_harmony_new_target_;
@@ -917,7 +942,10 @@ class PreParserExpression {
static PreParserExpression BinaryOperation(PreParserExpression left,
Token::Value op,
PreParserExpression right) {
- return PreParserExpression(TypeField::encode(kBinaryOperationExpression));
+ return PreParserExpression(
+ TypeField::encode(kBinaryOperationExpression) |
+ HasRestField::encode(op == Token::COMMA &&
+ right->IsSpreadExpression()));
}
static PreParserExpression StringLiteral() {
@@ -1034,6 +1062,14 @@ class PreParserExpression {
return TypeField::decode(code_) == kSpreadExpression;
}
+ bool IsArrowFunctionFormalParametersWithRestParameter() const {
+ // Iff the expression classifier has determined that this expression is a
+ // valid arrow fformal parameter list, return true if the formal parameter
+ // list ends with a rest parameter.
+ return IsSpreadExpression() ||
+ (IsBinaryOperation() && HasRestField::decode(code_));
+ }
+
PreParserExpression AsFunctionLiteral() { return *this; }
bool IsBinaryOperation() const {
@@ -1082,6 +1118,7 @@ class PreParserExpression {
typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseStrongField;
typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
IdentifierTypeField;
+ typedef BitField<bool, TypeField::kNext, 1> HasRestField;
uint32_t code_;
};
@@ -1308,6 +1345,10 @@ class PreParserFactory {
return PreParserExpression::Spread(expression);
}
+ PreParserExpression NewEmptyParentheses(int pos) {
+ return PreParserExpression::Default();
+ }
+
// Return the object itself as AstVisitor and implement the needed
// dummy method right in this class.
PreParserFactory* visitor() { return this; }
@@ -1644,8 +1685,11 @@ class PreParserTraits {
++parameters->arity;
}
void DeclareFormalParameter(Scope* scope, PreParserIdentifier parameter,
- bool is_simple,
- ExpressionClassifier* classifier) {}
+ ExpressionClassifier* classifier) {
+ if (!classifier->is_simple_parameter_list()) {
+ scope->SetHasNonSimpleParameters();
+ }
+ }
void CheckConflictingVarDeclarations(Scope* scope, bool* ok) {}
@@ -1743,8 +1787,8 @@ class PreParser : public ParserBase<PreParserTraits> {
// At return, unless an error occurred, the scanner is positioned before the
// the final '}'.
PreParseResult PreParseLazyFunction(
- LanguageMode language_mode, FunctionKind kind, ParserRecorder* log,
- Scanner::BookmarkScope* bookmark = nullptr);
+ LanguageMode language_mode, FunctionKind kind, bool has_simple_parameters,
+ ParserRecorder* log, Scanner::BookmarkScope* bookmark = nullptr);
private:
friend class PreParserTraits;
@@ -1847,6 +1891,12 @@ void PreParserTraits::ParseArrowFunctionFormalParameterList(
Scanner::Location* duplicate_loc, bool* ok) {
// TODO(wingo): Detect duplicated identifiers in paramlists. Detect parameter
// lists that are too long.
+
+ // Accomodate array literal for rest parameter.
+ if (params.IsArrowFunctionFormalParametersWithRestParameter()) {
+ ++parameters->materialized_literals_count;
+ pre_parser_->function_state_->NextMaterializedLiteralIndex();
+ }
}
@@ -2178,7 +2228,7 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
if (FLAG_strong_this && is_strong(language_mode())) {
// Constructors' usages of 'this' in strong mode are parsed separately.
// TODO(rossberg): this does not work with arrow functions yet.
- if (i::IsConstructor(function_state_->kind())) {
+ if (IsClassConstructor(function_state_->kind())) {
ReportMessage(MessageTemplate::kStrongConstructorThis);
*ok = false;
break;
@@ -2262,38 +2312,37 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
}
BindingPatternUnexpectedToken(classifier);
Consume(Token::LPAREN);
- if (allow_harmony_arrow_functions() && Check(Token::RPAREN)) {
- // As a primary expression, the only thing that can follow "()" is "=>".
+ if (Check(Token::RPAREN)) {
+ // ()=>x. The continuation that looks for the => is in
+ // ParseAssignmentExpression.
+ classifier->RecordExpressionError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::RPAREN));
classifier->RecordBindingPatternError(scanner()->location(),
MessageTemplate::kUnexpectedToken,
Token::String(Token::RPAREN));
- // Give a good error to the user who might have typed e.g. "return();".
- if (peek() != Token::ARROW) {
- ReportUnexpectedTokenAt(scanner_->peek_location(), peek(),
- MessageTemplate::kMissingArrow);
+ result = factory()->NewEmptyParentheses(beg_pos);
+ } else if (allow_harmony_rest_parameters() && Check(Token::ELLIPSIS)) {
+ // (...x)=>x. The continuation that looks for the => is in
+ // ParseAssignmentExpression.
+ int ellipsis_pos = scanner()->location().beg_pos;
+ classifier->RecordExpressionError(scanner()->location(),
+ MessageTemplate::kUnexpectedToken,
+ Token::String(Token::ELLIPSIS));
+ classifier->RecordNonSimpleParameter();
+ Scanner::Location expr_loc = scanner()->peek_location();
+ Token::Value tok = peek();
+ result = this->ParseAssignmentExpression(true, classifier, CHECK_OK);
+ // Patterns are not allowed as rest parameters. There is no way we can
+ // succeed so go ahead and use the convenient ReportUnexpectedToken
+ // interface.
+ if (!Traits::IsIdentifier(result)) {
+ ReportUnexpectedTokenAt(expr_loc, tok);
*ok = false;
return this->EmptyExpression();
}
- Scope* scope =
- this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- FormalParametersT parameters(scope);
- scope->set_start_position(beg_pos);
- ExpressionClassifier args_classifier;
- result = this->ParseArrowFunctionLiteral(parameters, args_classifier,
- CHECK_OK);
- } else if (allow_harmony_arrow_functions() &&
- allow_harmony_rest_parameters() && Check(Token::ELLIPSIS)) {
- // (...x) => y
- Scope* scope =
- this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
- FormalParametersT formals(scope);
- scope->set_start_position(beg_pos);
- ExpressionClassifier formals_classifier;
- formals.has_rest = true;
- this->ParseFormalParameter(&formals, &formals_classifier, CHECK_OK);
- Traits::DeclareFormalParameter(
- formals.scope, formals.at(0), formals.is_simple,
- &formals_classifier);
+ result = factory()->NewSpread(result, ellipsis_pos);
+
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -2301,8 +2350,6 @@ ParserBase<Traits>::ParsePrimaryExpression(ExpressionClassifier* classifier,
return this->EmptyExpression();
}
Expect(Token::RPAREN, CHECK_OK);
- result = this->ParseArrowFunctionLiteral(formals, formals_classifier,
- CHECK_OK);
} else {
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
@@ -2386,6 +2433,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
this->ParseAssignmentExpression(accept_IN, &binding_classifier, CHECK_OK);
classifier->Accumulate(binding_classifier,
ExpressionClassifier::AllProductions);
+ bool is_simple_parameter_list = this->IsIdentifier(result);
bool seen_rest = false;
while (peek() == Token::COMMA) {
if (seen_rest) {
@@ -2409,10 +2457,15 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseExpression(
ExpressionT right = this->ParseAssignmentExpression(
accept_IN, &binding_classifier, CHECK_OK);
if (is_rest) right = factory()->NewSpread(right, pos);
+ is_simple_parameter_list =
+ is_simple_parameter_list && this->IsIdentifier(right);
classifier->Accumulate(binding_classifier,
ExpressionClassifier::AllProductions);
result = factory()->NewBinaryOperation(Token::COMMA, result, right, pos);
}
+ if (!is_simple_parameter_list || seen_rest) {
+ classifier->RecordNonSimpleParameter();
+ }
return result;
}
@@ -2512,7 +2565,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
ExpressionT expression =
ParseAssignmentExpression(true, &computed_name_classifier, CHECK_OK);
classifier->Accumulate(computed_name_classifier,
- ExpressionClassifier::ExpressionProduction);
+ ExpressionClassifier::ExpressionProductions);
Expect(Token::RBRACK, CHECK_OK);
return expression;
}
@@ -2558,19 +2611,66 @@ ParserBase<Traits>::ParsePropertyDefinition(
this->PushLiteralName(fni_, name);
}
- if (!in_class && !is_generator && peek() == Token::COLON) {
- // PropertyDefinition : PropertyName ':' AssignmentExpression
- if (!*is_computed_name) {
- checker->CheckProperty(name_token, kValueProperty, is_static,
- is_generator,
- CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ if (!in_class && !is_generator) {
+ DCHECK(!is_static);
+
+ if (peek() == Token::COLON) {
+ // PropertyDefinition
+ // PropertyName ':' AssignmentExpression
+ if (!*is_computed_name) {
+ checker->CheckProperty(name_token, kValueProperty, false, false,
+ CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ }
+ Consume(Token::COLON);
+ value = this->ParseAssignmentExpression(
+ true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ return factory()->NewObjectLiteralProperty(name_expression, value, false,
+ *is_computed_name);
+ }
+
+ if (Token::IsIdentifier(name_token, language_mode(),
+ this->is_generator()) &&
+ (peek() == Token::COMMA || peek() == Token::RBRACE ||
+ peek() == Token::ASSIGN)) {
+ // PropertyDefinition
+ // IdentifierReference
+ // CoverInitializedName
+ //
+ // CoverInitializedName
+ // IdentifierReference Initializer?
+ if (classifier->duplicate_finder() != nullptr &&
+ scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
+ classifier->RecordDuplicateFormalParameterError(scanner()->location());
+ }
+
+ ExpressionT lhs = this->ExpressionFromIdentifier(
+ name, next_beg_pos, next_end_pos, scope_, factory());
+
+ if (peek() == Token::ASSIGN) {
+ this->ExpressionUnexpectedToken(classifier);
+ Consume(Token::ASSIGN);
+ ExpressionClassifier rhs_classifier;
+ ExpressionT rhs = this->ParseAssignmentExpression(
+ true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ classifier->Accumulate(rhs_classifier,
+ ExpressionClassifier::ExpressionProductions);
+ value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
+ RelocInfo::kNoPosition);
+ } else {
+ value = lhs;
+ }
+
+ return factory()->NewObjectLiteralProperty(
+ name_expression, value, ObjectLiteralProperty::COMPUTED, false,
+ false);
}
- Consume(Token::COLON);
- value = this->ParseAssignmentExpression(
- true, classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+ }
+
- } else if (is_generator || peek() == Token::LPAREN) {
- // Concise Method
+ if (is_generator || peek() == Token::LPAREN) {
+ // MethodDefinition
+ // PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
+ // '*' PropertyName '(' StrictFormalParameters ')' '{' FunctionBody '}'
if (!*is_computed_name) {
checker->CheckProperty(name_token, kMethodProperty, is_static,
is_generator,
@@ -2597,13 +2697,19 @@ ParserBase<Traits>::ParsePropertyDefinition(
return factory()->NewObjectLiteralProperty(name_expression, value,
ObjectLiteralProperty::COMPUTED,
is_static, *is_computed_name);
+ }
- } else if (in_class && name_is_static && !is_static) {
- // static MethodDefinition
+ if (in_class && name_is_static && !is_static) {
+ // ClassElement (static)
+ // 'static' MethodDefinition
return ParsePropertyDefinition(checker, true, has_extends, true,
is_computed_name, nullptr, classifier, ok);
- } else if (is_get || is_set) {
- // Accessor
+ }
+
+ if (is_get || is_set) {
+ // MethodDefinition (Accessors)
+ // get PropertyName '(' ')' '{' FunctionBody '}'
+ // set PropertyName '(' PropertySetParameterList ')' '{' FunctionBody '}'
name = this->EmptyIdentifier();
bool dont_care = false;
name_token = peek();
@@ -2638,44 +2744,12 @@ ParserBase<Traits>::ParsePropertyDefinition(
name_expression, value,
is_get ? ObjectLiteralProperty::GETTER : ObjectLiteralProperty::SETTER,
is_static, *is_computed_name);
-
- } else if (!in_class && Token::IsIdentifier(name_token, language_mode(),
- this->is_generator())) {
- DCHECK(!*is_computed_name);
- DCHECK(!is_static);
-
- if (classifier->duplicate_finder() != nullptr &&
- scanner()->FindSymbol(classifier->duplicate_finder(), 1) != 0) {
- classifier->RecordDuplicateFormalParameterError(scanner()->location());
- }
-
- ExpressionT lhs = this->ExpressionFromIdentifier(
- name, next_beg_pos, next_end_pos, scope_, factory());
- if (peek() == Token::ASSIGN) {
- this->ExpressionUnexpectedToken(classifier);
- Consume(Token::ASSIGN);
- ExpressionClassifier rhs_classifier;
- ExpressionT rhs = this->ParseAssignmentExpression(
- true, &rhs_classifier, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
- classifier->Accumulate(rhs_classifier,
- ExpressionClassifier::ExpressionProduction);
- value = factory()->NewAssignment(Token::ASSIGN, lhs, rhs,
- RelocInfo::kNoPosition);
- } else {
- value = lhs;
- }
- return factory()->NewObjectLiteralProperty(
- name_expression, value, ObjectLiteralProperty::COMPUTED, false, false);
-
- } else {
- Token::Value next = Next();
- ReportUnexpectedToken(next);
- *ok = false;
- return this->EmptyObjectLiteralProperty();
}
- return factory()->NewObjectLiteralProperty(name_expression, value, is_static,
- *is_computed_name);
+ Token::Value next = Next();
+ ReportUnexpectedToken(next);
+ *ok = false;
+ return this->EmptyObjectLiteralProperty();
}
@@ -2761,7 +2835,8 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
bool was_unspread = false;
int unspread_sequences_count = 0;
while (!done) {
- bool is_spread = allow_harmony_spreadcalls() && (peek() == Token::ELLIPSIS);
+ bool is_spread =
+ allow_harmony_spread_calls() && (peek() == Token::ELLIPSIS);
int start_pos = peek_position();
if (is_spread) Consume(Token::ELLIPSIS);
@@ -2840,7 +2915,6 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
}
ExpressionT expression = this->ParseConditionalExpression(
accept_IN, &arrow_formals_classifier, CHECK_OK);
-
if (allow_harmony_arrow_functions() && peek() == Token::ARROW) {
BindingPatternUnexpectedToken(classifier);
ValidateArrowFormalParameters(&arrow_formals_classifier, expression,
@@ -2849,12 +2923,18 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
Scope* scope =
this->NewScope(scope_, ARROW_SCOPE, FunctionKind::kArrowFunction);
FormalParametersT parameters(scope);
- checkpoint.Restore(&parameters.materialized_literals_count);
+ if (!arrow_formals_classifier.is_simple_parameter_list()) {
+ scope->SetHasNonSimpleParameters();
+ parameters.is_simple = false;
+ }
- scope->set_start_position(lhs_beg_pos);
Scanner::Location duplicate_loc = Scanner::Location::invalid();
this->ParseArrowFunctionFormalParameterList(&parameters, expression, loc,
&duplicate_loc, CHECK_OK);
+
+ checkpoint.Restore(&parameters.materialized_literals_count);
+
+ scope->set_start_position(lhs_beg_pos);
if (duplicate_loc.IsValid()) {
arrow_formals_classifier.RecordDuplicateFormalParameterError(
duplicate_loc);
@@ -2897,7 +2977,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN,
ExpressionT right =
this->ParseAssignmentExpression(accept_IN, &rhs_classifier, CHECK_OK);
classifier->Accumulate(rhs_classifier,
- ExpressionClassifier::ExpressionProduction);
+ ExpressionClassifier::ExpressionProductions);
// TODO(1231235): We try to estimate the set of properties set by
// constructors. We define a new property whenever there is an
@@ -2935,6 +3015,8 @@ ParserBase<Traits>::ParseYieldExpression(ExpressionClassifier* classifier,
// YieldExpression ::
// 'yield' ([no line terminator] '*'? AssignmentExpression)?
int pos = peek_position();
+ BindingPatternUnexpectedToken(classifier);
+ FormalParameterInitializerUnexpectedToken(classifier);
Expect(Token::YIELD, CHECK_OK);
ExpressionT generator_object =
factory()->NewVariableProxy(function_state_->generator_object_variable());
@@ -3531,7 +3613,7 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new,
Scope* scope = scope_->ReceiverScope();
FunctionKind kind = scope->function_kind();
if (IsConciseMethod(kind) || IsAccessorFunction(kind) ||
- i::IsConstructor(kind)) {
+ IsClassConstructor(kind)) {
if (peek() == Token::PERIOD || peek() == Token::LBRACK) {
scope->RecordSuperPropertyUsage();
return this->SuperPropertyReference(scope_, factory(), pos);
@@ -3662,6 +3744,15 @@ void ParserBase<Traits>::ParseFormalParameter(
return;
}
parameters->is_simple = false;
+ ValidateFormalParameterInitializer(classifier, ok);
+ if (!*ok) return;
+ classifier->RecordNonSimpleParameter();
+ }
+
+ if (is_rest) {
+ parameters->rest_array_literal_index =
+ function_state_->NextMaterializedLiteralIndex();
+ ++parameters->materialized_literals_count;
}
ExpressionT initializer = Traits::EmptyExpression();
@@ -3670,8 +3761,10 @@ void ParserBase<Traits>::ParseFormalParameter(
initializer = ParseAssignmentExpression(true, &init_classifier, ok);
if (!*ok) return;
ValidateExpression(&init_classifier, ok);
+ ValidateFormalParameterInitializer(&init_classifier, ok);
if (!*ok) return;
parameters->is_simple = false;
+ classifier->RecordNonSimpleParameter();
}
Traits::AddFormalParameter(parameters, pattern, initializer, is_rest);
@@ -3712,6 +3805,7 @@ void ParserBase<Traits>::ParseFormalParameterList(
if (parameters->has_rest) {
parameters->is_simple = false;
+ classifier->RecordNonSimpleParameter();
if (peek() == Token::COMMA) {
ReportMessageAt(scanner()->peek_location(),
MessageTemplate::kParamAfterRest);
@@ -3723,8 +3817,7 @@ void ParserBase<Traits>::ParseFormalParameterList(
for (int i = 0; i < parameters->Arity(); ++i) {
auto parameter = parameters->at(i);
- Traits::DeclareFormalParameter(
- parameters->scope, parameter, parameters->is_simple, classifier);
+ Traits::DeclareFormalParameter(parameters->scope, parameter, classifier);
}
}
@@ -3760,6 +3853,27 @@ void ParserBase<Traits>::CheckArityRestrictions(
template <class Traits>
+bool ParserBase<Traits>::IsNextLetKeyword() {
+ DCHECK(peek() == Token::LET);
+ if (!allow_let()) {
+ return false;
+ }
+ Token::Value next_next = PeekAhead();
+ switch (next_next) {
+ case Token::LBRACE:
+ case Token::LBRACK:
+ case Token::IDENTIFIER:
+ case Token::STATIC:
+ case Token::LET: // Yes, you can do let let = ... in sloppy mode
+ case Token::YIELD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+template <class Traits>
typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseArrowFunctionLiteral(
const FormalParametersT& formal_parameters,
@@ -3801,6 +3915,11 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(
body = this->NewStatementList(0, zone());
this->SkipLazyFunctionBody(&materialized_literal_count,
&expected_property_count, CHECK_OK);
+
+ if (formal_parameters.materialized_literals_count > 0) {
+ materialized_literal_count +=
+ formal_parameters.materialized_literals_count;
+ }
} else {
body = this->ParseEagerFunctionBody(
this->EmptyIdentifier(), RelocInfo::kNoPosition, formal_parameters,
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 9bc4e6a562..59db57ac7e 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -2,13 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <stdarg.h>
+#include "src/prettyprinter.h"
-#include "src/v8.h"
+#include <stdarg.h>
#include "src/ast-value-factory.h"
#include "src/base/platform/platform.h"
-#include "src/prettyprinter.h"
#include "src/scopes.h"
namespace v8 {
@@ -115,6 +114,12 @@ void CallPrinter::VisitExpressionStatement(ExpressionStatement* node) {
void CallPrinter::VisitEmptyStatement(EmptyStatement* node) {}
+void CallPrinter::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ Find(node->statement());
+}
+
+
void CallPrinter::VisitIfStatement(IfStatement* node) {
Find(node->condition());
Find(node->then_statement());
@@ -361,6 +366,11 @@ void CallPrinter::VisitSpread(Spread* node) {
}
+void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
+ UNREACHABLE();
+}
+
+
void CallPrinter::VisitThisFunction(ThisFunction* node) {}
@@ -424,8 +434,7 @@ static int FormatICSlotNode(Vector<char>* buf, Expression* node,
const char* node_name, FeedbackVectorICSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
- const char* str = Code::Kind2String(node->FeedbackICSlotKind(0));
- pos = SNPrintF(*buf + pos, " ICSlot(%d, %s)", slot.ToInt(), str);
+ pos = SNPrintF(*buf + pos, " ICSlot(%d)", slot.ToInt());
}
return pos;
}
@@ -495,6 +504,12 @@ void PrettyPrinter::VisitEmptyStatement(EmptyStatement* node) {
}
+void PrettyPrinter::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ Visit(node->statement());
+}
+
+
void PrettyPrinter::VisitIfStatement(IfStatement* node) {
Print("if (");
Visit(node->condition());
@@ -796,8 +811,7 @@ void PrettyPrinter::VisitCallNew(CallNew* node) {
void PrettyPrinter::VisitCallRuntime(CallRuntime* node) {
- Print("%%");
- PrintLiteral(node->name(), false);
+ Print("%%%s\n", node->debug_name());
PrintArguments(node->arguments());
}
@@ -846,6 +860,11 @@ void PrettyPrinter::VisitSpread(Spread* node) {
}
+void PrettyPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
+ Print("()");
+}
+
+
void PrettyPrinter::VisitThisFunction(ThisFunction* node) {
Print("<this-function>");
}
@@ -887,7 +906,7 @@ const char* PrettyPrinter::PrintProgram(FunctionLiteral* program) {
void PrettyPrinter::PrintOut(Isolate* isolate, Zone* zone, AstNode* node) {
PrettyPrinter printer(isolate, zone);
- PrintF("%s", printer.Print(node));
+ PrintF("%s\n", printer.Print(node));
}
@@ -1208,6 +1227,12 @@ void AstPrinter::VisitEmptyStatement(EmptyStatement* node) {
}
+void AstPrinter::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ Visit(node->statement());
+}
+
+
void AstPrinter::VisitIfStatement(IfStatement* node) {
IndentedScope indent(this, "IF");
PrintIndentedVisit("CONDITION", node->condition());
@@ -1524,9 +1549,8 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf;
- FormatICSlotNode(&buf, node, "CALL RUNTIME", node->CallRuntimeFeedbackSlot());
+ SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
IndentedScope indent(this, buf.start());
- PrintLiteralIndented("NAME", node->name(), false);
PrintArguments(node->arguments());
}
@@ -1566,6 +1590,11 @@ void AstPrinter::VisitSpread(Spread* node) {
}
+void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
+ IndentedScope indent(this, "()");
+}
+
+
void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION");
}
diff --git a/deps/v8/src/allocation-tracker.cc b/deps/v8/src/profiler/allocation-tracker.cc
index 128d25c8d2..942068ea08 100644
--- a/deps/v8/src/allocation-tracker.cc
+++ b/deps/v8/src/profiler/allocation-tracker.cc
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/profiler/allocation-tracker.h"
-#include "src/allocation-tracker.h"
#include "src/frames-inl.h"
-#include "src/heap-snapshot-generator-inl.h"
+#include "src/objects-inl.h"
+#include "src/profiler/heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
@@ -176,8 +176,7 @@ void AddressToTraceMap::RemoveRange(Address start, Address end) {
break;
}
++it;
- }
- while (it != ranges_.end());
+ } while (it != ranges_.end());
ranges_.erase(to_remove_begin, it);
@@ -274,7 +273,7 @@ unsigned AllocationTracker::AddFunctionInfo(SharedFunctionInfo* shared,
Name* name = Name::cast(script->name());
info->script_name = names_->GetName(name);
}
- info->script_id = script->id()->value();
+ info->script_id = script->id();
// Converting start offset into line and column may cause heap
// allocations so we postpone them until snapshot serialization.
unresolved_locations_.Add(new UnresolvedLocation(
diff --git a/deps/v8/src/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index c409f2437b..ebda3dba83 100644
--- a/deps/v8/src/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_ALLOCATION_TRACKER_H_
-#define V8_ALLOCATION_TRACKER_H_
+#ifndef V8_PROFILER_ALLOCATION_TRACKER_H_
+#define V8_PROFILER_ALLOCATION_TRACKER_H_
#include <map>
diff --git a/deps/v8/src/circular-queue-inl.h b/deps/v8/src/profiler/circular-queue-inl.h
index 2f06f6c496..66b4af5b4a 100644
--- a/deps/v8/src/circular-queue-inl.h
+++ b/deps/v8/src/profiler/circular-queue-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CIRCULAR_QUEUE_INL_H_
-#define V8_CIRCULAR_QUEUE_INL_H_
+#ifndef V8_PROFILER_CIRCULAR_QUEUE_INL_H_
+#define V8_PROFILER_CIRCULAR_QUEUE_INL_H_
-#include "src/circular-queue.h"
+#include "src/profiler/circular-queue.h"
namespace v8 {
namespace internal {
@@ -66,4 +66,4 @@ typename SamplingCircularQueue<T, L>::Entry* SamplingCircularQueue<T, L>::Next(
} } // namespace v8::internal
-#endif // V8_CIRCULAR_QUEUE_INL_H_
+#endif // V8_PROFILER_CIRCULAR_QUEUE_INL_H_
diff --git a/deps/v8/src/circular-queue.h b/deps/v8/src/profiler/circular-queue.h
index c312c597c6..3508b371c4 100644
--- a/deps/v8/src/circular-queue.h
+++ b/deps/v8/src/profiler/circular-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CIRCULAR_QUEUE_H_
-#define V8_CIRCULAR_QUEUE_H_
+#ifndef V8_PROFILER_CIRCULAR_QUEUE_H_
+#define V8_PROFILER_CIRCULAR_QUEUE_H_
#include "src/base/atomicops.h"
#include "src/globals.h"
@@ -65,4 +65,4 @@ class SamplingCircularQueue {
} } // namespace v8::internal
-#endif // V8_CIRCULAR_QUEUE_H_
+#endif // V8_PROFILER_CIRCULAR_QUEUE_H_
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/profiler/cpu-profiler-inl.h
index 075f285489..df727ae7cb 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/profiler/cpu-profiler-inl.h
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CPU_PROFILER_INL_H_
-#define V8_CPU_PROFILER_INL_H_
+#ifndef V8_PROFILER_CPU_PROFILER_INL_H_
+#define V8_PROFILER_CPU_PROFILER_INL_H_
-#include "src/cpu-profiler.h"
+#include "src/profiler/cpu-profiler.h"
#include <new>
-#include "src/circular-queue-inl.h"
-#include "src/profile-generator-inl.h"
-#include "src/unbound-queue-inl.h"
+#include "src/profiler/circular-queue-inl.h"
+#include "src/profiler/profile-generator-inl.h"
+#include "src/profiler/unbound-queue-inl.h"
namespace v8 {
namespace internal {
@@ -76,4 +76,4 @@ void ProfilerEventsProcessor::FinishTickSample() {
} } // namespace v8::internal
-#endif // V8_CPU_PROFILER_INL_H_
+#endif // V8_PROFILER_CPU_PROFILER_INL_H_
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index 638f915138..c4216ed478 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
-#include "src/cpu-profiler-inl.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/compiler.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/hashmap.h"
#include "src/log-inl.h"
+#include "src/profiler/cpu-profiler-inl.h"
#include "src/vm-state-inl.h"
#include "include/v8-profiler.h"
@@ -201,18 +200,7 @@ void CpuProfiler::DeleteProfile(CpuProfile* profile) {
}
-static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag) {
- return FLAG_prof_browser_mode
- && (tag != Logger::CALLBACK_TAG
- && tag != Logger::FUNCTION_TAG
- && tag != Logger::LAZY_COMPILE_TAG
- && tag != Logger::REG_EXP_TAG
- && tag != Logger::SCRIPT_TAG);
-}
-
-
void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
- if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = entry_point;
@@ -227,7 +215,6 @@ void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
const char* name) {
- if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -243,7 +230,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
Name* name) {
- if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -259,7 +245,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info, Name* script_name) {
- if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -282,7 +267,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
SharedFunctionInfo* shared,
CompilationInfo* info, Name* script_name,
int line, int column) {
- if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -319,7 +303,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
int args_count) {
- if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -368,7 +351,6 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
- if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = entry_point;
@@ -382,7 +364,6 @@ void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
- if (FilterOutCodeCreateEvent(Logger::REG_EXP_TAG)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = code->address();
@@ -396,7 +377,6 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
- if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
rec->start = entry_point;
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index 7619a1ef6d..2d6732725a 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_CPU_PROFILER_H_
-#define V8_CPU_PROFILER_H_
+#ifndef V8_PROFILER_CPU_PROFILER_H_
+#define V8_PROFILER_CPU_PROFILER_H_
#include "src/allocation.h"
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
-#include "src/circular-queue.h"
#include "src/compiler.h"
-#include "src/sampler.h"
-#include "src/unbound-queue.h"
+#include "src/profiler/circular-queue.h"
+#include "src/profiler/sampler.h"
+#include "src/profiler/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -273,4 +273,4 @@ class CpuProfiler : public CodeEventListener {
} } // namespace v8::internal
-#endif // V8_CPU_PROFILER_H_
+#endif // V8_PROFILER_CPU_PROFILER_H_
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/profiler/heap-profiler.cc
index 68d94ccbd9..3f776e05a8 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/profiler/heap-profiler.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/profiler/heap-profiler.h"
-#include "src/heap-profiler.h"
-
-#include "src/allocation-tracker.h"
-#include "src/heap-snapshot-generator-inl.h"
+#include "src/api.h"
+#include "src/profiler/allocation-tracker.h"
+#include "src/profiler/heap-snapshot-generator-inl.h"
namespace v8 {
namespace internal {
@@ -136,6 +135,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+ base::LockGuard<base::Mutex> guard(&profiler_mutex_);
bool known_object = ids_->MoveObject(from, to, size);
if (!known_object && !allocation_tracker_.is_empty()) {
allocation_tracker_->address_to_trace()->MoveObject(from, to, size);
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/profiler/heap-profiler.h
index 16dd08a265..b304f388ff 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/profiler/heap-profiler.h
@@ -2,11 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_PROFILER_H_
-#define V8_HEAP_PROFILER_H_
+#ifndef V8_PROFILER_HEAP_PROFILER_H_
+#define V8_PROFILER_HEAP_PROFILER_H_
#include "src/base/smart-pointers.h"
#include "src/isolate.h"
+#include "src/list.h"
namespace v8 {
namespace internal {
@@ -75,8 +76,9 @@ class HeapProfiler {
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
base::SmartPointer<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
+ base::Mutex profiler_mutex_;
};
} } // namespace v8::internal
-#endif // V8_HEAP_PROFILER_H_
+#endif // V8_PROFILER_HEAP_PROFILER_H_
diff --git a/deps/v8/src/heap-snapshot-generator-inl.h b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
index ad95776cbe..12e37f5e60 100644
--- a/deps/v8/src/heap-snapshot-generator-inl.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
-#define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+#ifndef V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_INL_H_
+#define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_INL_H_
-#include "src/heap-snapshot-generator.h"
+#include "src/profiler/heap-snapshot-generator.h"
namespace v8 {
namespace internal {
@@ -45,4 +45,4 @@ HeapGraphEdge** HeapEntry::children_arr() {
} } // namespace v8::internal
-#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
+#endif // V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_INL_H_
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index f9c235ed94..c7bb3c950a 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/profiler/heap-snapshot-generator.h"
-#include "src/heap-snapshot-generator-inl.h"
-
-#include "src/allocation-tracker.h"
#include "src/code-stubs.h"
#include "src/conversions.h"
#include "src/debug/debug.h"
-#include "src/heap-profiler.h"
+#include "src/profiler/allocation-tracker.h"
+#include "src/profiler/heap-profiler.h"
+#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/types.h"
namespace v8 {
@@ -1266,7 +1265,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
TagObject(context->embedder_data(), "(context data)");
- NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
+ NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD)
EXTRACT_CONTEXT_FIELD(OPTIMIZED_FUNCTIONS_LIST, unused,
optimized_functions_list);
EXTRACT_CONTEXT_FIELD(OPTIMIZED_CODE_LIST, unused, optimized_code_list);
@@ -1473,8 +1472,8 @@ void V8HeapExplorer::TagBuiltinCodeObject(Code* code, const char* name) {
void V8HeapExplorer::TagCodeObject(Code* code) {
if (code->kind() == Code::STUB) {
TagObject(code, names_->GetFormatted(
- "(%s code)", CodeStub::MajorName(
- CodeStub::GetMajorKey(code), true)));
+ "(%s code)",
+ CodeStub::MajorName(CodeStub::GetMajorKey(code))));
}
}
@@ -2163,7 +2162,7 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
#define SYMBOL_NAME(name) NAME_ENTRY(name)
PRIVATE_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
-#define SYMBOL_NAME(name, varname, description) NAME_ENTRY(name)
+#define SYMBOL_NAME(name, description) NAME_ENTRY(name)
PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
#undef SYMBOL_NAME
#undef NAME_ENTRY
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 5693cc16c3..1baebeee9e 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -2,10 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
-#define V8_HEAP_SNAPSHOT_GENERATOR_H_
+#ifndef V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
+#define V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
+#include "include/v8-profiler.h"
#include "src/base/platform/time.h"
+#include "src/objects.h"
#include "src/strings-storage.h"
namespace v8 {
@@ -14,6 +16,8 @@ namespace internal {
class AllocationTracker;
class AllocationTraceNode;
class HeapEntry;
+class HeapIterator;
+class HeapProfiler;
class HeapSnapshot;
class SnapshotFiller;
@@ -614,4 +618,4 @@ class HeapSnapshotJSONSerializer {
} } // namespace v8::internal
-#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_
+#endif // V8_PROFILER_HEAP_SNAPSHOT_GENERATOR_H_
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profiler/profile-generator-inl.h
index 1e543dda8b..c2e98cc4c8 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profiler/profile-generator-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROFILE_GENERATOR_INL_H_
-#define V8_PROFILE_GENERATOR_INL_H_
+#ifndef V8_PROFILER_PROFILE_GENERATOR_INL_H_
+#define V8_PROFILER_PROFILE_GENERATOR_INL_H_
-#include "src/profile-generator.h"
+#include "src/profiler/profile-generator.h"
namespace v8 {
namespace internal {
@@ -31,16 +31,6 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
instruction_start_(instruction_start) {}
-bool CodeEntry::is_js_function_tag(Logger::LogEventsAndTags tag) {
- return tag == Logger::FUNCTION_TAG
- || tag == Logger::LAZY_COMPILE_TAG
- || tag == Logger::SCRIPT_TAG
- || tag == Logger::NATIVE_FUNCTION_TAG
- || tag == Logger::NATIVE_LAZY_COMPILE_TAG
- || tag == Logger::NATIVE_SCRIPT_TAG;
-}
-
-
ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
: tree_(tree),
entry_(entry),
@@ -55,4 +45,4 @@ inline unsigned ProfileNode::function_id() const {
}
} } // namespace v8::internal
-#endif // V8_PROFILE_GENERATOR_INL_H_
+#endif // V8_PROFILER_PROFILE_GENERATOR_INL_H_
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index d2e94b4758..f3592bba65 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
-#include "src/profile-generator-inl.h"
+#include "src/profiler/profile-generator.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/global-handles.h"
-#include "src/sampler.h"
+#include "src/profiler/profile-generator-inl.h"
+#include "src/profiler/sampler.h"
#include "src/scopeinfo.h"
#include "src/splay-tree-inl.h"
#include "src/unicode.h"
@@ -107,7 +106,7 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
if (!shared->script()->IsScript()) return;
Script* script = Script::cast(shared->script());
- set_script_id(script->id()->value());
+ set_script_id(script->id());
set_position(shared->start_position());
set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
}
@@ -406,15 +405,12 @@ void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
}
-CodeEntry* CodeMap::FindEntry(Address addr, Address* start) {
+CodeEntry* CodeMap::FindEntry(Address addr) {
CodeTree::Locator locator;
if (tree_.FindGreatestLessThan(addr, &locator)) {
// locator.key() <= addr. Need to check that addr is within entry.
const CodeEntryInfo& entry = locator.value();
if (addr < (locator.key() + entry.size)) {
- if (start) {
- *start = locator.key();
- }
return entry.entry;
}
}
@@ -602,8 +598,14 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
// that a callback calls itself.
*entry++ = code_map_.FindEntry(sample.external_callback);
} else {
- Address start;
- CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
+ CodeEntry* pc_entry = code_map_.FindEntry(sample.pc);
+ // If there is no pc_entry we're likely in native code.
+ // Find out, if top of stack was pointing inside a JS function
+ // meaning that we have encountered a frameless invocation.
+ if (!pc_entry && (sample.top_frame_type == StackFrame::JAVA_SCRIPT ||
+ sample.top_frame_type == StackFrame::OPTIMIZED)) {
+ pc_entry = code_map_.FindEntry(sample.tos);
+ }
// If pc is in the function code before it set up stack frame or after the
// frame was destroyed SafeStackFrameIterator incorrectly thinks that
// ebp contains return address of the current function and skips caller's
@@ -645,8 +647,7 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
*stack_end = stack_pos + sample.frames_count;
stack_pos != stack_end;
++stack_pos) {
- Address start = NULL;
- *entry = code_map_.FindEntry(*stack_pos, &start);
+ *entry = code_map_.FindEntry(*stack_pos);
// Skip unresolved frames (e.g. internal frame) and get source line of
// the first JS caller.
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index e1826f742e..ce58d70ae9 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_PROFILE_GENERATOR_H_
-#define V8_PROFILE_GENERATOR_H_
+#ifndef V8_PROFILER_PROFILE_GENERATOR_H_
+#define V8_PROFILER_PROFILE_GENERATOR_H_
#include <map>
#include "include/v8-profiler.h"
@@ -49,7 +49,6 @@ class CodeEntry {
Address instruction_start = NULL);
~CodeEntry();
- bool is_js_function() const { return is_js_function_tag(tag()); }
const char* name_prefix() const { return name_prefix_; }
bool has_name_prefix() const { return name_prefix_[0] != '\0'; }
const char* name() const { return name_; }
@@ -84,8 +83,6 @@ class CodeEntry {
void FillFunctionInfo(SharedFunctionInfo* shared);
- static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
-
List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
void set_no_frame_ranges(List<OffsetRange>* ranges) {
no_frame_ranges_ = ranges;
@@ -269,7 +266,7 @@ class CodeMap {
~CodeMap();
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
- CodeEntry* FindEntry(Address addr, Address* start = NULL);
+ CodeEntry* FindEntry(Address addr);
int GetSharedId(Address addr);
void Print();
@@ -388,4 +385,4 @@ class ProfileGenerator {
} } // namespace v8::internal
-#endif // V8_PROFILE_GENERATOR_H_
+#endif // V8_PROFILER_PROFILE_GENERATOR_H_
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/profiler/sampler.cc
index 79b5bff855..dc4c4c4f06 100644
--- a/deps/v8/src/sampler.cc
+++ b/deps/v8/src/profiler/sampler.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/sampler.h"
+#include "src/profiler/sampler.h"
#if V8_OS_POSIX && !V8_OS_CYGWIN
@@ -42,13 +42,11 @@
#endif
-#include "src/v8.h"
-
#include "src/base/platform/platform.h"
-#include "src/cpu-profiler-inl.h"
#include "src/flags.h"
#include "src/frames-inl.h"
#include "src/log.h"
+#include "src/profiler/cpu-profiler-inl.h"
#include "src/simulator.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
@@ -175,6 +173,69 @@ class PlatformDataCommon : public Malloced {
ThreadId profiled_thread_id_;
};
+
+bool IsSamePage(byte* ptr1, byte* ptr2) {
+ const uint32_t kPageSize = 4096;
+ uintptr_t mask = ~static_cast<uintptr_t>(kPageSize - 1);
+ return (reinterpret_cast<uintptr_t>(ptr1) & mask) ==
+ (reinterpret_cast<uintptr_t>(ptr2) & mask);
+}
+
+
+// Check if the code at specified address could potentially be a
+// frame setup code.
+bool IsNoFrameRegion(Address address) {
+ struct Pattern {
+ int bytes_count;
+ byte bytes[8];
+ int offsets[4];
+ };
+ byte* pc = reinterpret_cast<byte*>(address);
+ static Pattern patterns[] = {
+#if V8_HOST_ARCH_IA32
+ // push %ebp
+ // mov %esp,%ebp
+ {3, {0x55, 0x89, 0xe5}, {0, 1, -1}},
+ // pop %ebp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // pop %ebp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#elif V8_HOST_ARCH_X64
+ // pushq %rbp
+ // movq %rsp,%rbp
+ {4, {0x55, 0x48, 0x89, 0xe5}, {0, 1, -1}},
+ // popq %rbp
+ // ret N
+ {2, {0x5d, 0xc2}, {0, 1, -1}},
+ // popq %rbp
+ // ret
+ {2, {0x5d, 0xc3}, {0, 1, -1}},
+#endif
+ {0, {}, {}}
+ };
+ for (Pattern* pattern = patterns; pattern->bytes_count; ++pattern) {
+ for (int* offset_ptr = pattern->offsets; *offset_ptr != -1; ++offset_ptr) {
+ int offset = *offset_ptr;
+ if (!offset || IsSamePage(pc, pc - offset)) {
+ MSAN_MEMORY_IS_INITIALIZED(pc - offset, pattern->bytes_count);
+ if (!memcmp(pc - offset, pattern->bytes, pattern->bytes_count))
+ return true;
+ } else {
+ // It is not safe to examine bytes on another page as it might not be
+ // allocated thus causing a SEGFAULT.
+ // Check the pattern part that's on the same page and
+ // pessimistically assume it could be the entire pattern match.
+ MSAN_MEMORY_IS_INITIALIZED(pc, pattern->bytes_count - offset);
+ if (!memcmp(pc, pattern->bytes + offset, pattern->bytes_count - offset))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace
#if defined(USE_SIGNALS)
@@ -234,7 +295,9 @@ class SimulatorHelper {
inline void FillRegisters(v8::RegisterState* state) {
#if V8_TARGET_ARCH_ARM
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ if (!simulator_->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ }
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
@@ -245,19 +308,30 @@ class SimulatorHelper {
// the sp or fp register. ARM64 simulator does this in two steps:
// first setting it to zero and then setting it to a new value.
// Bailout if sp/fp doesn't contain the new value.
+ //
+ // FIXME: The above doesn't really solve the issue.
+ // If a 64-bit target is executed on a 32-bit host even the final
+ // write is non-atomic, so it might obtain a half of the result.
+ // Moreover as long as the register set code uses memcpy (as of now),
+ // it is not guaranteed to be atomic even when both host and target
+ // are of same bitness.
return;
}
state->pc = reinterpret_cast<Address>(simulator_->pc());
state->sp = reinterpret_cast<Address>(simulator_->sp());
state->fp = reinterpret_cast<Address>(simulator_->fp());
#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ if (!simulator_->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ }
state->sp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::sp));
state->fp = reinterpret_cast<Address>(simulator_->get_register(
Simulator::fp));
#elif V8_TARGET_ARCH_PPC
- state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ if (!simulator_->has_bad_pc()) {
+ state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+ }
state->sp =
reinterpret_cast<Address>(simulator_->get_register(Simulator::sp));
state->fp =
@@ -594,6 +668,11 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
Address js_entry_sp = isolate->js_entry_sp();
if (js_entry_sp == 0) return; // Not executing JS now.
+ if (pc && IsNoFrameRegion(pc)) {
+ pc = 0;
+ return;
+ }
+
ExternalCallbackScope* scope = isolate->external_callback_scope();
Address handler = Isolate::handler(isolate->thread_local_top());
// If there is a handler on top of the external callback scope then
@@ -603,6 +682,9 @@ DISABLE_ASAN void TickSample::Init(Isolate* isolate,
external_callback = scope->callback();
has_external_callback = true;
} else {
+ // sp register may point at an arbitrary place in memory, make
+ // sure MSAN doesn't complain about it.
+ MSAN_MEMORY_IS_INITIALIZED(regs.sp, sizeof(Address));
// Sample potential return address value for frameless invocation of
// stubs (we'll figure out later, if this value makes sense).
tos = Memory::Address_at(reinterpret_cast<Address>(regs.sp));
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/profiler/sampler.h
index bfdf961229..ed932c0354 100644
--- a/deps/v8/src/sampler.h
+++ b/deps/v8/src/profiler/sampler.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_SAMPLER_H_
-#define V8_SAMPLER_H_
+#ifndef V8_PROFILER_SAMPLER_H_
+#define V8_PROFILER_SAMPLER_H_
#include "include/v8.h"
@@ -132,4 +132,4 @@ class Sampler {
} } // namespace v8::internal
-#endif // V8_SAMPLER_H_
+#endif // V8_PROFILER_SAMPLER_H_
diff --git a/deps/v8/src/unbound-queue-inl.h b/deps/v8/src/profiler/unbound-queue-inl.h
index 6782281680..fef7bec8d3 100644
--- a/deps/v8/src/unbound-queue-inl.h
+++ b/deps/v8/src/profiler/unbound-queue-inl.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNBOUND_QUEUE_INL_H_
-#define V8_UNBOUND_QUEUE_INL_H_
+#ifndef V8_PROFILER_UNBOUND_QUEUE_INL_H_
+#define V8_PROFILER_UNBOUND_QUEUE_INL_H_
-#include "src/unbound-queue.h"
+#include "src/profiler/unbound-queue.h"
namespace v8 {
namespace internal {
@@ -79,4 +79,4 @@ Record* UnboundQueue<Record>::Peek() const {
} } // namespace v8::internal
-#endif // V8_UNBOUND_QUEUE_INL_H_
+#endif // V8_PROFILER_UNBOUND_QUEUE_INL_H_
diff --git a/deps/v8/src/unbound-queue.h b/deps/v8/src/profiler/unbound-queue.h
index 3e12928973..a63c327d10 100644
--- a/deps/v8/src/unbound-queue.h
+++ b/deps/v8/src/profiler/unbound-queue.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNBOUND_QUEUE_
-#define V8_UNBOUND_QUEUE_
+#ifndef V8_PROFILER_UNBOUND_QUEUE_
+#define V8_PROFILER_UNBOUND_QUEUE_
#include "src/allocation.h"
#include "src/base/atomicops.h"
@@ -44,4 +44,4 @@ class UnboundQueue BASE_EMBEDDED {
} } // namespace v8::internal
-#endif // V8_UNBOUND_QUEUE_
+#endif // V8_PROFILER_UNBOUND_QUEUE_
diff --git a/deps/v8/src/prologue.js b/deps/v8/src/prologue.js
index 4906d41f3d..a54de36563 100644
--- a/deps/v8/src/prologue.js
+++ b/deps/v8/src/prologue.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function(global, utils) {
+(function(global, utils, extrasUtils) {
"use strict";
@@ -13,8 +13,7 @@
var imports = UNDEFINED;
var imports_from_experimental = UNDEFINED;
-var exports_to_runtime = UNDEFINED;
-var exports_container = {};
+var exports_container = %ExportFromRuntime({});
// Export to other scripts.
// In normal natives, this exports functions to other normal natives.
@@ -25,13 +24,6 @@ function Export(f) {
}
-// Export to the native context for calls from the runtime.
-function ExportToRuntime(f) {
- f.next = exports_to_runtime;
- exports_to_runtime = f;
-}
-
-
// Import from other scripts. The actual importing happens in PostNatives and
// PostExperimental so that we can import from scripts executed later. However,
// that means that the import is not available until the very end. If the
@@ -44,11 +36,12 @@ function Import(f) {
imports = f;
}
+
// Import immediately from exports of previous scripts. We need this for
// functions called during bootstrapping. Hooking up imports in PostNatives
// would be too late.
-function ImportNow(f) {
- f(exports_container);
+function ImportNow(name) {
+ return exports_container[name];
}
@@ -156,6 +149,7 @@ function SetUpLockedPrototype(
%ToFastProperties(prototype);
}
+
// -----------------------------------------------------------------------
// To be called by bootstrapper
@@ -166,13 +160,6 @@ function PostNatives(utils) {
imports(exports_container);
}
- var runtime_container = {};
- for ( ; !IS_UNDEFINED(exports_to_runtime);
- exports_to_runtime = exports_to_runtime.next) {
- exports_to_runtime(runtime_container);
- }
- %ImportToRuntime(runtime_container);
-
// Whitelist of exports from normal natives to experimental natives and debug.
var expose_list = [
"ArrayToString",
@@ -198,10 +185,19 @@ function PostNatives(utils) {
"ObjectIsFrozen",
"ObjectDefineProperty",
"OwnPropertyKeys",
+ "SymbolToString",
"ToNameArray",
"ToBoolean",
"ToNumber",
"ToString",
+ // From runtime:
+ "is_concat_spreadable_symbol",
+ "iterator_symbol",
+ "promise_status_symbol",
+ "promise_value_symbol",
+ "reflect_apply",
+ "reflect_construct",
+ "to_string_tag_symbol",
];
var filtered_exports = {};
@@ -220,7 +216,7 @@ function PostNatives(utils) {
function PostExperimentals(utils) {
%CheckIsBootstrapping();
-
+ %ExportExperimentalFromRuntime(exports_container);
for ( ; !IS_UNDEFINED(imports); imports = imports.next) {
imports(exports_container);
}
@@ -228,12 +224,6 @@ function PostExperimentals(utils) {
imports_from_experimental = imports_from_experimental.next) {
imports_from_experimental(exports_container);
}
- var runtime_container = {};
- for ( ; !IS_UNDEFINED(exports_to_runtime);
- exports_to_runtime = exports_to_runtime.next) {
- exports_to_runtime(runtime_container);
- }
- %ImportExperimentalToRuntime(runtime_container);
exports_container = UNDEFINED;
@@ -259,12 +249,11 @@ function PostDebug(utils) {
// -----------------------------------------------------------------------
-%OptimizeObjectForAddingMultipleProperties(utils, 14);
+%OptimizeObjectForAddingMultipleProperties(utils, 13);
utils.Import = Import;
utils.ImportNow = ImportNow;
utils.Export = Export;
-utils.ExportToRuntime = ExportToRuntime;
utils.ImportFromExperimental = ImportFromExperimental;
utils.SetFunctionName = SetFunctionName;
utils.InstallConstants = InstallConstants;
@@ -278,4 +267,49 @@ utils.PostDebug = PostDebug;
%ToFastProperties(utils);
+// -----------------------------------------------------------------------
+
+%OptimizeObjectForAddingMultipleProperties(extrasUtils, 5);
+
+extrasUtils.logStackTrace = function logStackTrace() {
+ %DebugTrace();
+};
+
+extrasUtils.log = function log() {
+ let message = '';
+ for (const arg of arguments) {
+ message += arg;
+ }
+
+ %GlobalPrint(message);
+};
+
+// Extras need the ability to store private state on their objects without
+// exposing it to the outside world.
+
+extrasUtils.createPrivateSymbol = function createPrivateSymbol(name) {
+ return %CreatePrivateSymbol(name);
+};
+
+// These functions are key for safe meta-programming:
+// http://wiki.ecmascript.org/doku.php?id=conventions:safe_meta_programming
+//
+// Technically they could all be derived from combinations of
+// Function.prototype.{bind,call,apply} but that introduces lots of layers of
+// indirection and slowness given how un-optimized bind is.
+
+extrasUtils.simpleBind = function simpleBind(func, thisArg) {
+ return function() {
+ return %Apply(func, thisArg, arguments, 0, arguments.length);
+ };
+};
+
+extrasUtils.uncurryThis = function uncurryThis(func) {
+ return function(thisArg) {
+ return %Apply(func, thisArg, arguments, 1, arguments.length - 1);
+ };
+};
+
+%ToFastProperties(extrasUtils);
+
})
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index 0233dbebe4..b509e76e4a 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -2,11 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var $promiseHasUserDefinedRejectHandler;
-var $promiseStatus;
-var $promiseValue;
-
-(function(global, utils) {
+(function(global, utils, extrasUtils) {
"use strict";
@@ -16,22 +12,25 @@ var $promiseValue;
// Imports
var InternalArray = utils.InternalArray;
+var promiseHasHandlerSymbol =
+ utils.ImportNow("promise_has_handler_symbol");
+var promiseOnRejectSymbol = utils.ImportNow("promise_on_reject_symbol");
+var promiseOnResolveSymbol =
+ utils.ImportNow("promise_on_resolve_symbol");
+var promiseRawSymbol = utils.ImportNow("promise_raw_symbol");
+var promiseStatusSymbol = utils.ImportNow("promise_status_symbol");
+var promiseValueSymbol = utils.ImportNow("promise_value_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
// -------------------------------------------------------------------
// Status values: 0 = pending, +1 = resolved, -1 = rejected
-var promiseStatus = GLOBAL_PRIVATE("Promise#status");
-var promiseValue = GLOBAL_PRIVATE("Promise#value");
-var promiseOnResolve = GLOBAL_PRIVATE("Promise#onResolve");
-var promiseOnReject = GLOBAL_PRIVATE("Promise#onReject");
-var promiseRaw = GLOBAL_PRIVATE("Promise#raw");
-var promiseHasHandler = %PromiseHasHandlerSymbol();
var lastMicrotaskId = 0;
var GlobalPromise = function Promise(resolver) {
- if (resolver === promiseRaw) return;
+ if (resolver === promiseRawSymbol) return;
if (!%_IsConstructCall()) throw MakeTypeError(kNotAPromise, this);
- if (!IS_SPEC_FUNCTION(resolver))
+ if (!IS_CALLABLE(resolver))
throw MakeTypeError(kResolverNotAFunction, resolver);
var promise = PromiseInit(this);
try {
@@ -48,10 +47,10 @@ var GlobalPromise = function Promise(resolver) {
// Core functionality.
function PromiseSet(promise, status, value, onResolve, onReject) {
- SET_PRIVATE(promise, promiseStatus, status);
- SET_PRIVATE(promise, promiseValue, value);
- SET_PRIVATE(promise, promiseOnResolve, onResolve);
- SET_PRIVATE(promise, promiseOnReject, onReject);
+ SET_PRIVATE(promise, promiseStatusSymbol, status);
+ SET_PRIVATE(promise, promiseValueSymbol, value);
+ SET_PRIVATE(promise, promiseOnResolveSymbol, onResolve);
+ SET_PRIVATE(promise, promiseOnRejectSymbol, onReject);
if (DEBUG_IS_ACTIVE) {
%DebugPromiseEvent({ promise: promise, status: status, value: value });
}
@@ -59,7 +58,7 @@ function PromiseSet(promise, status, value, onResolve, onReject) {
}
function PromiseCreateAndSet(status, value) {
- var promise = new GlobalPromise(promiseRaw);
+ var promise = new GlobalPromise(promiseRawSymbol);
// If debug is active, notify about the newly created promise first.
if (DEBUG_IS_ACTIVE) PromiseSet(promise, 0, UNDEFINED);
return PromiseSet(promise, status, value);
@@ -71,7 +70,7 @@ function PromiseInit(promise) {
}
function PromiseDone(promise, status, value, promiseQueue) {
- if (GET_PRIVATE(promise, promiseStatus) === 0) {
+ if (GET_PRIVATE(promise, promiseStatusSymbol) === 0) {
var tasks = GET_PRIVATE(promise, promiseQueue);
if (tasks.length) PromiseEnqueue(value, tasks, status);
PromiseSet(promise, status, value);
@@ -86,10 +85,10 @@ function PromiseCoerce(constructor, x) {
} catch(r) {
return %_CallFunction(constructor, r, PromiseRejected);
}
- if (IS_SPEC_FUNCTION(then)) {
+ if (IS_CALLABLE(then)) {
var deferred = %_CallFunction(constructor, PromiseDeferred);
try {
- %_CallFunction(x, deferred.resolve, deferred.reject, then);
+ %_Call(then, x, deferred.resolve, deferred.reject);
} catch(r) {
deferred.reject(r);
}
@@ -148,7 +147,7 @@ function PromiseNopResolver() {}
// For bootstrapper.
function IsPromise(x) {
- return IS_SPEC_OBJECT(x) && HAS_DEFINED_PRIVATE(x, promiseStatus);
+ return IS_SPEC_OBJECT(x) && HAS_DEFINED_PRIVATE(x, promiseStatusSymbol);
}
function PromiseCreate() {
@@ -156,19 +155,20 @@ function PromiseCreate() {
}
function PromiseResolve(promise, x) {
- PromiseDone(promise, +1, x, promiseOnResolve)
+ PromiseDone(promise, +1, x, promiseOnResolveSymbol)
}
function PromiseReject(promise, r) {
// Check promise status to confirm that this reject has an effect.
// Call runtime for callbacks to the debugger or for unhandled reject.
- if (GET_PRIVATE(promise, promiseStatus) == 0) {
+ if (GET_PRIVATE(promise, promiseStatusSymbol) == 0) {
var debug_is_active = DEBUG_IS_ACTIVE;
- if (debug_is_active || !HAS_DEFINED_PRIVATE(promise, promiseHasHandler)) {
+ if (debug_is_active ||
+ !HAS_DEFINED_PRIVATE(promise, promiseHasHandlerSymbol)) {
%PromiseRejectEvent(promise, r, debug_is_active);
}
}
- PromiseDone(promise, -1, r, promiseOnReject)
+ PromiseDone(promise, -1, r, promiseOnRejectSymbol)
}
// Convenience.
@@ -176,7 +176,7 @@ function PromiseReject(promise, r) {
function PromiseDeferred() {
if (this === GlobalPromise) {
// Optimized case, avoid extra closure.
- var promise = PromiseInit(new GlobalPromise(promiseRaw));
+ var promise = PromiseInit(new GlobalPromise(promiseRawSymbol));
return {
promise: promise,
resolve: function(x) { PromiseResolve(promise, x) },
@@ -221,31 +221,31 @@ function PromiseChain(onResolve, onReject) { // a.k.a. flatMap
onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
var deferred = %_CallFunction(this.constructor, PromiseDeferred);
- switch (GET_PRIVATE(this, promiseStatus)) {
+ switch (GET_PRIVATE(this, promiseStatusSymbol)) {
case UNDEFINED:
throw MakeTypeError(kNotAPromise, this);
case 0: // Pending
- GET_PRIVATE(this, promiseOnResolve).push(onResolve, deferred);
- GET_PRIVATE(this, promiseOnReject).push(onReject, deferred);
+ GET_PRIVATE(this, promiseOnResolveSymbol).push(onResolve, deferred);
+ GET_PRIVATE(this, promiseOnRejectSymbol).push(onReject, deferred);
break;
case +1: // Resolved
- PromiseEnqueue(GET_PRIVATE(this, promiseValue),
+ PromiseEnqueue(GET_PRIVATE(this, promiseValueSymbol),
[onResolve, deferred],
+1);
break;
case -1: // Rejected
- if (!HAS_DEFINED_PRIVATE(this, promiseHasHandler)) {
+ if (!HAS_DEFINED_PRIVATE(this, promiseHasHandlerSymbol)) {
// Promise has already been rejected, but had no handler.
// Revoke previously triggered reject event.
%PromiseRevokeReject(this);
}
- PromiseEnqueue(GET_PRIVATE(this, promiseValue),
+ PromiseEnqueue(GET_PRIVATE(this, promiseValueSymbol),
[onReject, deferred],
-1);
break;
}
// Mark this promise as having handler.
- SET_PRIVATE(this, promiseHasHandler, true);
+ SET_PRIVATE(this, promiseHasHandlerSymbol, true);
if (DEBUG_IS_ACTIVE) {
%DebugPromiseEvent({ promise: deferred.promise, parentPromise: this });
}
@@ -259,10 +259,8 @@ function PromiseCatch(onReject) {
// Multi-unwrapped chaining with thenable coercion.
function PromiseThen(onResolve, onReject) {
- onResolve = IS_SPEC_FUNCTION(onResolve) ? onResolve
- : PromiseIdResolveHandler;
- onReject = IS_SPEC_FUNCTION(onReject) ? onReject
- : PromiseIdRejectHandler;
+ onResolve = IS_CALLABLE(onResolve) ? onResolve : PromiseIdResolveHandler;
+ onReject = IS_CALLABLE(onReject) ? onReject : PromiseIdRejectHandler;
var that = this;
var constructor = this.constructor;
return %_CallFunction(
@@ -340,7 +338,7 @@ function PromiseRace(iterable) {
// Utility for debugger
function PromiseHasUserDefinedRejectHandlerRecursive(promise) {
- var queue = GET_PRIVATE(promise, promiseOnReject);
+ var queue = GET_PRIVATE(promise, promiseOnRejectSymbol);
if (IS_UNDEFINED(queue)) return false;
for (var i = 0; i < queue.length; i += 2) {
if (queue[i] != PromiseIdRejectHandler) return true;
@@ -362,7 +360,7 @@ function PromiseHasUserDefinedRejectHandler() {
// Install exported functions.
%AddNamedProperty(global, 'Promise', GlobalPromise, DONT_ENUM);
-%AddNamedProperty(GlobalPromise.prototype, symbolToStringTag, "Promise",
+%AddNamedProperty(GlobalPromise.prototype, toStringTagSymbol, "Promise",
DONT_ENUM | READ_ONLY);
utils.InstallFunctions(GlobalPromise, DONT_ENUM, [
@@ -380,19 +378,23 @@ utils.InstallFunctions(GlobalPromise.prototype, DONT_ENUM, [
"catch", PromiseCatch
]);
-$promiseStatus = promiseStatus;
-$promiseValue = promiseValue;
-
-utils.ExportToRuntime(function(to) {
- to.promiseStatus = promiseStatus;
- to.promiseValue = promiseValue;
- to.PromiseCreate = PromiseCreate;
- to.PromiseResolve = PromiseResolve;
- to.PromiseReject = PromiseReject;
- to.PromiseChain = PromiseChain;
- to.PromiseCatch = PromiseCatch;
- to.PromiseThen = PromiseThen;
- to.PromiseHasUserDefinedRejectHandler = PromiseHasUserDefinedRejectHandler;
-});
+%InstallToContext([
+ "promise_catch", PromiseCatch,
+ "promise_chain", PromiseChain,
+ "promise_create", PromiseCreate,
+ "promise_has_user_defined_reject_handler", PromiseHasUserDefinedRejectHandler,
+ "promise_reject", PromiseReject,
+ "promise_resolve", PromiseResolve,
+ "promise_then", PromiseThen,
+]);
+
+// This allows extras to create promises quickly without building extra
+// resolve/reject closures, and allows them to later resolve and reject any
+// promise without having to hold on to those closures forever.
+utils.InstallFunctions(extrasUtils, 0, [
+ "createPromise", PromiseCreate,
+ "resolvePromise", PromiseResolve,
+ "rejectPromise", PromiseReject
+]);
})
diff --git a/deps/v8/src/prototype.h b/deps/v8/src/prototype.h
index 4df1114c77..0727749853 100644
--- a/deps/v8/src/prototype.h
+++ b/deps/v8/src/prototype.h
@@ -39,6 +39,7 @@ class PrototypeIterator {
Advance();
}
}
+
PrototypeIterator(Isolate* isolate, Object* receiver,
WhereToStart where_to_start = START_AT_PROTOTYPE)
: did_jump_to_prototype_chain_(false),
@@ -48,25 +49,32 @@ class PrototypeIterator {
Advance();
}
}
+
explicit PrototypeIterator(Map* receiver_map)
: did_jump_to_prototype_chain_(true),
object_(receiver_map->prototype()),
isolate_(receiver_map->GetIsolate()) {}
+
explicit PrototypeIterator(Handle<Map> receiver_map)
: did_jump_to_prototype_chain_(true),
object_(NULL),
handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
isolate_(receiver_map->GetIsolate()) {}
+
~PrototypeIterator() {}
- Object* GetCurrent() const {
+ template <typename T = Object>
+ T* GetCurrent() const {
DCHECK(handle_.is_null());
- return object_;
+ return T::cast(object_);
}
- static Handle<Object> GetCurrent(const PrototypeIterator& iterator) {
+
+ template <typename T = Object>
+ static Handle<T> GetCurrent(const PrototypeIterator& iterator) {
DCHECK(!iterator.handle_.is_null());
- return iterator.handle_;
+ return Handle<T>::cast(iterator.handle_);
}
+
void Advance() {
if (handle_.is_null() && object_->IsJSProxy()) {
did_jump_to_prototype_chain_ = true;
@@ -79,6 +87,7 @@ class PrototypeIterator {
}
AdvanceIgnoringProxies();
}
+
void AdvanceIgnoringProxies() {
if (!did_jump_to_prototype_chain_) {
did_jump_to_prototype_chain_ = true;
@@ -96,6 +105,7 @@ class PrototypeIterator {
}
}
}
+
bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const {
if (handle_.is_null()) {
return object_->IsNull() ||
@@ -109,10 +119,12 @@ class PrototypeIterator {
!Handle<HeapObject>::cast(handle_)->map()->is_hidden_prototype());
}
}
+
bool IsAtEnd(Object* final_object) {
DCHECK(handle_.is_null());
return object_->IsNull() || object_ == final_object;
}
+
bool IsAtEnd(Handle<Object> final_object) {
DCHECK(!handle_.is_null());
return handle_->IsNull() || *handle_ == *final_object;
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 88b6a273ba..cc45b32b3d 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -35,11 +35,11 @@ function ProxyCreate(handler, proto) {
function ProxyCreateFunction(handler, callTrap, constructTrap) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError(kProxyHandlerNonObject, "createFunction")
- if (!IS_SPEC_FUNCTION(callTrap))
+ if (!IS_CALLABLE(callTrap))
throw MakeTypeError(kProxyTrapFunctionExpected, "call")
if (IS_UNDEFINED(constructTrap)) {
constructTrap = DerivedConstructTrap(callTrap)
- } else if (IS_SPEC_FUNCTION(constructTrap)) {
+ } else if (IS_CALLABLE(constructTrap)) {
// Make sure the trap receives 'undefined' as this.
var construct = constructTrap
constructTrap = function() {
@@ -145,7 +145,7 @@ function DerivedKeysTrap() {
for (var i = 0, count = 0; i < names.length; ++i) {
var name = names[i]
if (IS_SYMBOL(name)) continue
- var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name))
+ var desc = this.getOwnPropertyDescriptor(TO_STRING(name))
if (!IS_UNDEFINED(desc) && desc.enumerable) {
enumerableNames[count++] = names[i]
}
@@ -159,7 +159,7 @@ function DerivedEnumerateTrap() {
for (var i = 0, count = 0; i < names.length; ++i) {
var name = names[i]
if (IS_SYMBOL(name)) continue
- var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name))
+ var desc = this.getPropertyDescriptor(TO_STRING(name))
if (!IS_UNDEFINED(desc)) {
if (!desc.configurable) {
throw MakeTypeError(kProxyPropNotConfigurable,
@@ -200,11 +200,11 @@ utils.Export(function(to) {
to.ProxyDerivedKeysTrap = DerivedKeysTrap;
});
-utils.ExportToRuntime(function(to) {
- to.ProxyDerivedGetTrap = DerivedGetTrap;
- to.ProxyDerivedHasTrap = DerivedHasTrap;
- to.ProxyDerivedSetTrap = DerivedSetTrap;
- to.ProxyEnumerate = ProxyEnumerate;
-});
+%InstallToContext([
+ "derived_get_trap", DerivedGetTrap,
+ "derived_has_trap", DerivedHasTrap,
+ "derived_set_trap", DerivedSetTrap,
+ "proxy_enumerate", ProxyEnumerate,
+]);
})
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index e717b26158..e19a813483 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -3,8 +3,6 @@
// found in the LICENSE file.
var $regexpLastMatchInfoOverride;
-var harmony_regexps = false;
-var harmony_unicode_regexps = false;
(function(global, utils) {
@@ -13,13 +11,21 @@ var harmony_unicode_regexps = false;
// -------------------------------------------------------------------
// Imports
+var FLAG_harmony_regexps;
+var FLAG_harmony_unicode_regexps;
var GlobalRegExp = global.RegExp;
var InternalPackedArray = utils.InternalPackedArray;
+var ToNumber;
utils.Import(function(from) {
ToNumber = from.ToNumber;
});
+utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_regexps = from.FLAG_harmony_regexps;
+ FLAG_harmony_unicode_regexps = from.FLAG_harmony_unicode_regexps;
+});
+
// -------------------------------------------------------------------
// Property of the builtins object for recording the result of the last
@@ -53,15 +59,15 @@ function DoConstructRegExp(object, pattern, flags) {
flags = (pattern.global ? 'g' : '')
+ (pattern.ignoreCase ? 'i' : '')
+ (pattern.multiline ? 'm' : '');
- if (harmony_unicode_regexps)
+ if (FLAG_harmony_unicode_regexps)
flags += (pattern.unicode ? 'u' : '');
- if (harmony_regexps)
+ if (FLAG_harmony_regexps)
flags += (pattern.sticky ? 'y' : '');
pattern = pattern.source;
}
- pattern = IS_UNDEFINED(pattern) ? '' : $toString(pattern);
- flags = IS_UNDEFINED(flags) ? '' : $toString(flags);
+ pattern = IS_UNDEFINED(pattern) ? '' : TO_STRING(pattern);
+ flags = IS_UNDEFINED(flags) ? '' : TO_STRING(flags);
%RegExpInitializeAndCompile(object, pattern, flags);
}
@@ -155,14 +161,14 @@ function RegExpExecJS(string) {
'RegExp.prototype.exec', this);
}
- string = TO_STRING_INLINE(string);
+ string = TO_STRING(string);
var lastIndex = this.lastIndex;
// Conversion is required by the ES5 specification (RegExp.prototype.exec
// algorithm, step 5) even if the value is discarded for non-global RegExps.
var i = TO_INTEGER(lastIndex);
- var updateLastIndex = this.global || (harmony_regexps && this.sticky);
+ var updateLastIndex = this.global || (FLAG_harmony_regexps && this.sticky);
if (updateLastIndex) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
@@ -202,7 +208,7 @@ function RegExpTest(string) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'RegExp.prototype.test', this);
}
- string = TO_STRING_INLINE(string);
+ string = TO_STRING(string);
var lastIndex = this.lastIndex;
@@ -210,7 +216,7 @@ function RegExpTest(string) {
// algorithm, step 5) even if the value is discarded for non-global RegExps.
var i = TO_INTEGER(lastIndex);
- if (this.global || (harmony_regexps && this.sticky)) {
+ if (this.global || (FLAG_harmony_regexps && this.sticky)) {
if (i < 0 || i > string.length) {
this.lastIndex = 0;
return false;
@@ -268,8 +274,8 @@ function RegExpToString() {
if (this.global) result += 'g';
if (this.ignoreCase) result += 'i';
if (this.multiline) result += 'm';
- if (harmony_unicode_regexps && this.unicode) result += 'u';
- if (harmony_regexps && this.sticky) result += 'y';
+ if (FLAG_harmony_unicode_regexps && this.unicode) result += 'u';
+ if (FLAG_harmony_regexps && this.sticky) result += 'y';
return result;
}
@@ -386,7 +392,7 @@ var RegExpGetInput = function() {
return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
};
var RegExpSetInput = function(string) {
- LAST_INPUT(RegExpLastMatchInfo) = $toString(string);
+ LAST_INPUT(RegExpLastMatchInfo) = TO_STRING(string);
};
%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index d502060440..d296d90e7d 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -7,9 +7,9 @@
#include "src/regexp/arm/regexp-macro-assembler-arm.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index ed24cf3401..d440879e26 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -7,9 +7,9 @@
#include "src/regexp/arm64/regexp-macro-assembler-arm64.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 3ba5db14f2..9e50a10574 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -6,9 +6,9 @@
#include "src/regexp/ia32/regexp-macro-assembler-ia32.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index aacaa1b9d3..438d1b1368 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -10,6 +10,7 @@
#include "src/compiler.h"
#include "src/execution.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/ostreams.h"
#include "src/parser.h"
@@ -6406,5 +6407,98 @@ bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
}
return too_much;
}
+
+
+Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
+ Object* key_pattern, ResultsCacheType type) {
+ FixedArray* cache;
+ if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ DCHECK(key_pattern->IsString());
+ if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
+ cache = heap->string_split_cache();
+ } else {
+ DCHECK(type == REGEXP_MULTIPLE_INDICES);
+ DCHECK(key_pattern->IsFixedArray());
+ cache = heap->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+ ~(kArrayEntriesPerCacheEntry - 1));
+ if (cache->get(index + kStringOffset) == key_string &&
+ cache->get(index + kPatternOffset) == key_pattern) {
+ return cache->get(index + kArrayOffset);
+ }
+ index =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index + kStringOffset) == key_string &&
+ cache->get(index + kPatternOffset) == key_pattern) {
+ return cache->get(index + kArrayOffset);
+ }
+ return Smi::FromInt(0);
+}
+
+
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern,
+ Handle<FixedArray> value_array,
+ ResultsCacheType type) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> cache;
+ if (!key_string->IsInternalizedString()) return;
+ if (type == STRING_SPLIT_SUBSTRINGS) {
+ DCHECK(key_pattern->IsString());
+ if (!key_pattern->IsInternalizedString()) return;
+ cache = factory->string_split_cache();
+ } else {
+ DCHECK(type == REGEXP_MULTIPLE_INDICES);
+ DCHECK(key_pattern->IsFixedArray());
+ cache = factory->regexp_multiple_cache();
+ }
+
+ uint32_t hash = key_string->Hash();
+ uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+ ~(kArrayEntriesPerCacheEntry - 1));
+ if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
+ } else {
+ uint32_t index2 =
+ ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+ if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
+ cache->set(index2 + kStringOffset, *key_string);
+ cache->set(index2 + kPatternOffset, *key_pattern);
+ cache->set(index2 + kArrayOffset, *value_array);
+ } else {
+ cache->set(index2 + kStringOffset, Smi::FromInt(0));
+ cache->set(index2 + kPatternOffset, Smi::FromInt(0));
+ cache->set(index2 + kArrayOffset, Smi::FromInt(0));
+ cache->set(index + kStringOffset, *key_string);
+ cache->set(index + kPatternOffset, *key_pattern);
+ cache->set(index + kArrayOffset, *value_array);
+ }
+ }
+ // If the array is a reasonably short list of substrings, convert it into a
+ // list of internalized strings.
+ if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+ for (int i = 0; i < value_array->length(); i++) {
+ Handle<String> str(String::cast(value_array->get(i)), isolate);
+ Handle<String> internalized_str = factory->InternalizeString(str);
+ value_array->set(i, *internalized_str);
+ }
+ }
+ // Convert backing store to a copy-on-write array.
+ value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
+}
+
+
+void RegExpResultsCache::Clear(FixedArray* cache) {
+ for (int i = 0; i < kRegExpResultsCacheSize; i++) {
+ cache->set(i, Smi::FromInt(0));
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/jsregexp.h b/deps/v8/src/regexp/jsregexp.h
index 39e702149d..760d37862b 100644
--- a/deps/v8/src/regexp/jsregexp.h
+++ b/deps/v8/src/regexp/jsregexp.h
@@ -1659,6 +1659,30 @@ class RegExpEngine: public AllStatic {
};
-} } // namespace v8::internal
+class RegExpResultsCache : public AllStatic {
+ public:
+ enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+ // Attempt to retrieve a cached result. On failure, 0 is returned as a Smi.
+ // On success, the returned result is guaranteed to be a COW-array.
+ static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
+ ResultsCacheType type);
+ // Attempt to add value_array to the cache specified by type. On success,
+ // value_array is turned into a COW-array.
+ static void Enter(Isolate* isolate, Handle<String> key_string,
+ Handle<Object> key_pattern, Handle<FixedArray> value_array,
+ ResultsCacheType type);
+ static void Clear(FixedArray* cache);
+ static const int kRegExpResultsCacheSize = 0x100;
+
+ private:
+ static const int kArrayEntriesPerCacheEntry = 4;
+ static const int kStringOffset = 0;
+ static const int kPatternOffset = 1;
+ static const int kArrayOffset = 2;
+};
+
+} // namespace internal
+} // namespace v8
#endif // V8_REGEXP_JSREGEXP_H_
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index 838fc68a80..869cbc4f2e 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -219,7 +219,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
Label not_at_start;
// Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+ __ ld(a0, MemOperand(frame_pointer(), kStartIndex));
BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
@@ -232,7 +232,7 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
// Did we start the match at the start of the string at all?
- __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+ __ ld(a0, MemOperand(frame_pointer(), kStartIndex));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
// If we did, are we still at the start of the input?
__ ld(a1, MemOperand(frame_pointer(), kInputStart));
@@ -779,7 +779,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
if (global()) {
// Restart matching if the regular expression is flagged as global.
__ ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ ld(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
__ ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ Daddu(a0, a0, 1);
@@ -1155,8 +1155,8 @@ int64_t RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Address re_frame) {
return NativeRegExpMacroAssembler::CheckStackGuardState(
frame_entry<Isolate*>(re_frame, kIsolate),
- frame_entry<int>(re_frame, kStartIndex),
- frame_entry<int>(re_frame, kDirectCall) == 1, return_address, re_code,
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ frame_entry<int64_t>(re_frame, kDirectCall) == 1, return_address, re_code,
frame_entry_address<String*>(re_frame, kInputString),
frame_entry_address<const byte*>(re_frame, kInputStart),
frame_entry_address<const byte*>(re_frame, kInputEnd));
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index e820aa9bbf..03f9741147 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -8,9 +8,9 @@
#include "src/base/bits.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 20105c0b30..9916d5f32f 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -6,6 +6,7 @@
#include "src/assembler.h"
#include "src/ast.h"
+#include "src/isolate-inl.h"
#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
diff --git a/deps/v8/src/regexp/regexp-stack.cc b/deps/v8/src/regexp/regexp-stack.cc
index 348e684151..34f9127f2f 100644
--- a/deps/v8/src/regexp/regexp-stack.cc
+++ b/deps/v8/src/regexp/regexp-stack.cc
@@ -5,7 +5,6 @@
#include "src/regexp/regexp-stack.h"
#include "src/isolate.h"
-#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index c0f5f2c323..969edc1b3b 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -6,9 +6,9 @@
#include "src/regexp/x64/regexp-macro-assembler-x64.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index c30994eae0..c6968dc197 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -6,9 +6,9 @@
#include "src/regexp/x87/regexp-macro-assembler-x87.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/macro-assembler.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h"
#include "src/unicode.h"
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index f7b0ce005e..d88e1199f8 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -190,6 +190,12 @@ void Processor::VisitWithStatement(WithStatement* node) {
}
+void Processor::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* node) {
+ Visit(node->statement());
+}
+
+
// Do nothing:
void Processor::VisitVariableDeclaration(VariableDeclaration* node) {}
void Processor::VisitFunctionDeclaration(FunctionDeclaration* node) {}
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 4a45baf522..6b942d44a6 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/runtime-profiler.h"
#include "src/assembler.h"
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index e2e6d2ef35..0e82d862bd 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -11,20 +11,14 @@
// The following declarations are shared with other native JS files.
// They are all declared at this one spot to avoid redeclaration errors.
-var $defaultNumber;
-var $defaultString;
var $NaN;
var $nonNumberToNumber;
-var $nonStringToString;
var $sameValue;
var $sameValueZero;
-var $toInteger;
-var $toLength;
-var $toName;
var $toNumber;
var $toPositiveInteger;
-var $toPrimitive;
-var $toString;
+
+var harmony_tolength = false;
(function(global, utils) {
@@ -33,486 +27,30 @@ var $toString;
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
var GlobalString = global.String;
-var GlobalNumber = global.Number;
+var isConcatSpreadableSymbol =
+ utils.ImportNow("is_concat_spreadable_symbol");
// ----------------------------------------------------------------------------
-/* -----------------------------------
-- - - C o m p a r i s o n - - -
------------------------------------
-*/
-
-// ECMA-262 Section 11.9.3.
-function EQUALS(y) {
- if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
- var x = this;
-
- while (true) {
- if (IS_NUMBER(x)) {
- while (true) {
- if (IS_NUMBER(y)) return %NumberEquals(x, y);
- if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (!IS_SPEC_OBJECT(y)) {
- if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) return 1; // not equal
- // String or boolean.
- return %NumberEquals(x, %$toNumber(y));
- }
- y = %$toPrimitive(y, NO_HINT);
- }
- } else if (IS_STRING(x)) {
- while (true) {
- if (IS_STRING(y)) return %StringEquals(x, y);
- if (IS_NUMBER(y)) return %NumberEquals(%$toNumber(x), y);
- if (IS_BOOLEAN(y)) return %NumberEquals(%$toNumber(x), %$toNumber(y));
- if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) return 1; // not equal
- y = %$toPrimitive(y, NO_HINT);
- }
- } else if (IS_SYMBOL(x)) {
- if (IS_SYMBOL(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- return 1; // not equal
- } else if (IS_BOOLEAN(x)) {
- if (IS_BOOLEAN(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- if (IS_NULL_OR_UNDEFINED(y)) return 1;
- if (IS_NUMBER(y)) return %NumberEquals(%$toNumber(x), y);
- if (IS_STRING(y)) return %NumberEquals(%$toNumber(x), %$toNumber(y));
- if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) return 1; // not equal
- // y is object.
- x = %$toNumber(x);
- y = %$toPrimitive(y, NO_HINT);
- } else if (IS_NULL_OR_UNDEFINED(x)) {
- return IS_NULL_OR_UNDEFINED(y) ? 0 : 1;
- } else if (IS_SIMD_VALUE(x)) {
- if (!IS_SIMD_VALUE(y)) return 1; // not equal
- return %SimdEquals(x, y);
- } else {
- // x is an object.
- if (IS_SPEC_OBJECT(y)) return %_ObjectEquals(x, y) ? 0 : 1;
- if (IS_NULL_OR_UNDEFINED(y)) return 1; // not equal
- if (IS_BOOLEAN(y)) {
- y = %$toNumber(y);
- } else if (IS_SYMBOL(y) || IS_SIMD_VALUE(y)) {
- return 1; // not equal
- }
- x = %$toPrimitive(x, NO_HINT);
- }
- }
-}
-
-
-// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
-// the result when either (or both) the operands are NaN.
-function COMPARE(x, ncr) {
- var left;
- var right;
- // Fast cases for string, numbers and undefined compares.
- if (IS_STRING(this)) {
- if (IS_STRING(x)) return %_StringCompare(this, x);
- if (IS_UNDEFINED(x)) return ncr;
- left = this;
- } else if (IS_NUMBER(this)) {
- if (IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
- if (IS_UNDEFINED(x)) return ncr;
- left = this;
- } else if (IS_UNDEFINED(this)) {
- if (!IS_UNDEFINED(x)) {
- %$toPrimitive(x, NUMBER_HINT);
- }
- return ncr;
- } else if (IS_UNDEFINED(x)) {
- %$toPrimitive(this, NUMBER_HINT);
- return ncr;
- } else {
- left = %$toPrimitive(this, NUMBER_HINT);
- }
-
- right = %$toPrimitive(x, NUMBER_HINT);
- if (IS_STRING(left) && IS_STRING(right)) {
- return %_StringCompare(left, right);
- } else {
- var left_number = %$toNumber(left);
- var right_number = %$toNumber(right);
- if (NUMBER_IS_NAN(left_number) || NUMBER_IS_NAN(right_number)) return ncr;
- return %NumberCompare(left_number, right_number, ncr);
- }
-}
-
-// Strong mode COMPARE throws if an implicit conversion would be performed
-function COMPARE_STRONG(x, ncr) {
- if (IS_STRING(this) && IS_STRING(x)) return %_StringCompare(this, x);
- if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
-
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-
-/* -----------------------------------
- - - - A r i t h m e t i c - - -
- -----------------------------------
-*/
-
-// ECMA-262, section 11.6.1, page 50.
-function ADD(x) {
- // Fast case: Check for number operands and do the addition.
- if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
- if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
-
- // Default implementation.
- var a = %$toPrimitive(this, NO_HINT);
- var b = %$toPrimitive(x, NO_HINT);
-
- if (IS_STRING(a)) {
- return %_StringAdd(a, %$toString(b));
- } else if (IS_STRING(b)) {
- return %_StringAdd(%$nonStringToString(a), b);
- } else {
- return %NumberAdd(%$toNumber(a), %$toNumber(b));
- }
-}
-
-
-// Strong mode ADD throws if an implicit conversion would be performed
-function ADD_STRONG(x) {
- if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
- if (IS_STRING(this) && IS_STRING(x)) return %_StringAdd(this, x);
-
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// Left operand (this) is already a string.
-function STRING_ADD_LEFT(y) {
- if (!IS_STRING(y)) {
- if (IS_STRING_WRAPPER(y) && %_IsStringWrapperSafeForDefaultValueOf(y)) {
- y = %_ValueOf(y);
- } else {
- y = IS_NUMBER(y)
- ? %_NumberToString(y)
- : %$toString(%$toPrimitive(y, NO_HINT));
- }
- }
- return %_StringAdd(this, y);
-}
-
-
-// Right operand (y) is already a string.
-function STRING_ADD_RIGHT(y) {
- var x = this;
- if (!IS_STRING(x)) {
- if (IS_STRING_WRAPPER(x) && %_IsStringWrapperSafeForDefaultValueOf(x)) {
- x = %_ValueOf(x);
- } else {
- x = IS_NUMBER(x)
- ? %_NumberToString(x)
- : %$toString(%$toPrimitive(x, NO_HINT));
- }
- }
- return %_StringAdd(x, y);
-}
-
-
-// ECMA-262, section 11.6.2, page 50.
-function SUB(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberSub(x, y);
-}
-
-
-// Strong mode SUB throws if an implicit conversion would be performed
-function SUB_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberSub(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.5.1, page 48.
-function MUL(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberMul(x, y);
-}
-
-
-// Strong mode MUL throws if an implicit conversion would be performed
-function MUL_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberMul(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.5.2, page 49.
-function DIV(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberDiv(x, y);
-}
-
-
-// Strong mode DIV throws if an implicit conversion would be performed
-function DIV_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberDiv(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.5.3, page 49.
-function MOD(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberMod(x, y);
-}
-
-
-// Strong mode MOD throws if an implicit conversion would be performed
-function MOD_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberMod(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-/* -------------------------------------------
- - - - B i t o p e r a t i o n s - - -
- -------------------------------------------
-*/
-
-// ECMA-262, section 11.10, page 57.
-function BIT_OR(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberOr(x, y);
-}
-
-
-// Strong mode BIT_OR throws if an implicit conversion would be performed
-function BIT_OR_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberOr(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.10, page 57.
-function BIT_AND(y) {
- var x;
- if (IS_NUMBER(this)) {
- x = this;
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- } else {
- x = %$nonNumberToNumber(this);
- // Make sure to convert the right operand to a number before
- // bailing out in the fast case, but after converting the
- // left operand. This ensures that valueOf methods on the right
- // operand are always executed.
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- // Optimize for the case where we end up AND'ing a value
- // that doesn't convert to a number. This is common in
- // certain benchmarks.
- if (NUMBER_IS_NAN(x)) return 0;
- }
- return %NumberAnd(x, y);
-}
-
-
-// Strong mode BIT_AND throws if an implicit conversion would be performed
-function BIT_AND_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberAnd(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.10, page 57.
-function BIT_XOR(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberXor(x, y);
-}
-
-
-// Strong mode BIT_XOR throws if an implicit conversion would be performed
-function BIT_XOR_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberXor(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.7.1, page 51.
-function SHL(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberShl(x, y);
-}
-
-
-// Strong mode SHL throws if an implicit conversion would be performed
-function SHL_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberShl(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.7.2, page 51.
-function SAR(y) {
- var x;
- if (IS_NUMBER(this)) {
- x = this;
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- } else {
- x = %$nonNumberToNumber(this);
- // Make sure to convert the right operand to a number before
- // bailing out in the fast case, but after converting the
- // left operand. This ensures that valueOf methods on the right
- // operand are always executed.
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- // Optimize for the case where we end up shifting a value
- // that doesn't convert to a number. This is common in
- // certain benchmarks.
- if (NUMBER_IS_NAN(x)) return 0;
- }
- return %NumberSar(x, y);
-}
-
-
-// Strong mode SAR throws if an implicit conversion would be performed
-function SAR_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberSar(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
-// ECMA-262, section 11.7.3, page 52.
-function SHR(y) {
- var x = IS_NUMBER(this) ? this : %$nonNumberToNumber(this);
- if (!IS_NUMBER(y)) y = %$nonNumberToNumber(y);
- return %NumberShr(x, y);
-}
-
-
-// Strong mode SHR throws if an implicit conversion would be performed
-function SHR_STRONG(y) {
- if (IS_NUMBER(this) && IS_NUMBER(y)) {
- return %NumberShr(this, y);
- }
- throw %MakeTypeError(kStrongImplicitConversion);
-}
-
-
/* -----------------------------
- - - H e l p e r s - - -
-----------------------------
*/
-// ECMA-262, section 11.8.7, page 54.
-function IN(x) {
- if (!IS_SPEC_OBJECT(x)) {
- throw %MakeTypeError(kInvalidInOperatorUse, this, x);
- }
- if (%_IsNonNegativeSmi(this)) {
- if (IS_ARRAY(x) && %_HasFastPackedElements(x)) {
- return this < x.length;
- }
- return %HasElement(x, this);
- }
- return %HasProperty(x, %$toName(this));
-}
-
-
-// ECMA-262, section 11.8.6, page 54. To make the implementation more
-// efficient, the return value should be zero if the 'this' is an
-// instance of F, and non-zero if not. This makes it possible to avoid
-// an expensive ToBoolean conversion in the generated code.
-function INSTANCE_OF(F) {
- var V = this;
- if (!IS_SPEC_FUNCTION(F)) {
- throw %MakeTypeError(kInstanceofFunctionExpected, F);
- }
-
- // If V is not an object, return false.
- if (!IS_SPEC_OBJECT(V)) {
- return 1;
- }
-
- // Check if function is bound, if so, get [[BoundFunction]] from it
- // and use that instead of F.
- var bindings = %BoundFunctionGetBindings(F);
- if (bindings) {
- F = bindings[kBoundFunctionIndex]; // Always a non-bound function.
- }
- // Get the prototype of F; if it is not an object, throw an error.
- var O = F.prototype;
- if (!IS_SPEC_OBJECT(O)) {
- throw %MakeTypeError(kInstanceofNonobjectProto, O);
- }
-
- // Return whether or not O is in the prototype chain of V.
- return %IsInPrototypeChain(O, V) ? 0 : 1;
-}
-
-
-function CALL_NON_FUNCTION() {
- var delegate = %GetFunctionDelegate(this);
- if (!IS_FUNCTION(delegate)) {
- var callsite = %RenderCallSite();
- if (callsite == "") callsite = typeof this;
- throw %MakeTypeError(kCalledNonCallable, callsite);
- }
- return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
-}
-
+function APPLY_PREPARE(args) {
+ var length;
-function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
- var delegate = %GetConstructorDelegate(this);
- if (!IS_FUNCTION(delegate)) {
- var callsite = %RenderCallSite();
- if (callsite == "") callsite = typeof this;
- throw %MakeTypeError(kCalledNonCallable, callsite);
+ // First check that the receiver is callable.
+ if (!IS_CALLABLE(this)) {
+ throw %make_type_error(kApplyNonFunction, TO_STRING(this), typeof this);
}
- return %Apply(delegate, this, arguments, 0, %_ArgumentsLength());
-}
-
-
-function CALL_FUNCTION_PROXY() {
- var arity = %_ArgumentsLength() - 1;
- var proxy = %_Arguments(arity); // The proxy comes in as an additional arg.
- var trap = %GetCallTrap(proxy);
- return %Apply(trap, this, arguments, 0, arity);
-}
-
-
-function CALL_FUNCTION_PROXY_AS_CONSTRUCTOR () {
- var proxy = this;
- var trap = %GetConstructTrap(proxy);
- return %Apply(trap, this, arguments, 0, %_ArgumentsLength());
-}
-
-function APPLY_PREPARE(args) {
- var length;
// First check whether length is a positive Smi and args is an
// array. This is the fast case. If this fails, we do the slow case
// that takes care of more eventualities.
if (IS_ARRAY(args)) {
length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
- IS_SPEC_FUNCTION(this)) {
+ if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength) {
return length;
}
}
@@ -522,15 +60,11 @@ function APPLY_PREPARE(args) {
// We can handle any number of apply arguments if the stack is
// big enough, but sanity check the value to avoid overflow when
// multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %MakeRangeError(kStackOverflow);
-
- if (!IS_SPEC_FUNCTION(this)) {
- throw %MakeTypeError(kApplyNonFunction, %$toString(this), typeof this);
- }
+ if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
// Make sure the arguments list has the right type.
if (args != null && !IS_SPEC_OBJECT(args)) {
- throw %MakeTypeError(kWrongArgs, "Function.prototype.apply");
+ throw %make_type_error(kWrongArgs, "Function.prototype.apply");
}
// Return the length which is the number of arguments to copy to the
@@ -541,31 +75,32 @@ function APPLY_PREPARE(args) {
function REFLECT_APPLY_PREPARE(args) {
var length;
+
+ // First check that the receiver is callable.
+ if (!IS_CALLABLE(this)) {
+ throw %make_type_error(kApplyNonFunction, TO_STRING(this), typeof this);
+ }
+
// First check whether length is a positive Smi and args is an
// array. This is the fast case. If this fails, we do the slow case
// that takes care of more eventualities.
if (IS_ARRAY(args)) {
length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
- IS_SPEC_FUNCTION(this)) {
+ if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength) {
return length;
}
}
- if (!IS_SPEC_FUNCTION(this)) {
- throw %MakeTypeError(kCalledNonCallable, %$toString(this));
- }
-
if (!IS_SPEC_OBJECT(args)) {
- throw %MakeTypeError(kWrongArgs, "Reflect.apply");
+ throw %make_type_error(kWrongArgs, "Reflect.apply");
}
- length = %$toLength(args.length);
+ length = TO_LENGTH(args.length);
// We can handle any number of apply arguments if the stack is
// big enough, but sanity check the value to avoid overflow when
// multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %MakeRangeError(kStackOverflow);
+ if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
// Return the length which is the number of arguments to copy to the
// stack. It is guaranteed to be a small integer at this point.
@@ -576,8 +111,8 @@ function REFLECT_APPLY_PREPARE(args) {
function REFLECT_CONSTRUCT_PREPARE(
args, newTarget) {
var length;
- var ctorOk = IS_SPEC_FUNCTION(this) && %IsConstructor(this);
- var newTargetOk = IS_SPEC_FUNCTION(newTarget) && %IsConstructor(newTarget);
+ var ctorOk = IS_CALLABLE(this) && %IsConstructor(this);
+ var newTargetOk = IS_CALLABLE(newTarget) && %IsConstructor(newTarget);
// First check whether length is a positive Smi and args is an
// array. This is the fast case. If this fails, we do the slow case
@@ -591,31 +126,31 @@ function REFLECT_CONSTRUCT_PREPARE(
}
if (!ctorOk) {
- if (!IS_SPEC_FUNCTION(this)) {
- throw %MakeTypeError(kCalledNonCallable, %$toString(this));
+ if (!IS_CALLABLE(this)) {
+ throw %make_type_error(kCalledNonCallable, TO_STRING(this));
} else {
- throw %MakeTypeError(kNotConstructor, %$toString(this));
+ throw %make_type_error(kNotConstructor, TO_STRING(this));
}
}
if (!newTargetOk) {
- if (!IS_SPEC_FUNCTION(newTarget)) {
- throw %MakeTypeError(kCalledNonCallable, %$toString(newTarget));
+ if (!IS_CALLABLE(newTarget)) {
+ throw %make_type_error(kCalledNonCallable, TO_STRING(newTarget));
} else {
- throw %MakeTypeError(kNotConstructor, %$toString(newTarget));
+ throw %make_type_error(kNotConstructor, TO_STRING(newTarget));
}
}
if (!IS_SPEC_OBJECT(args)) {
- throw %MakeTypeError(kWrongArgs, "Reflect.construct");
+ throw %make_type_error(kWrongArgs, "Reflect.construct");
}
- length = %$toLength(args.length);
+ length = TO_LENGTH(args.length);
// We can handle any number of apply arguments if the stack is
// big enough, but sanity check the value to avoid overflow when
// multiplying with pointer size.
- if (length > kSafeArgumentsLength) throw %MakeRangeError(kStackOverflow);
+ if (length > kSafeArgumentsLength) throw %make_range_error(kStackOverflow);
// Return the length which is the number of arguments to copy to the
// stack. It is guaranteed to be a small integer at this point.
@@ -624,47 +159,15 @@ function REFLECT_CONSTRUCT_PREPARE(
function CONCAT_ITERABLE_TO_ARRAY(iterable) {
- return %$concatIterableToArray(this, iterable);
+ return %concat_iterable_to_array(this, iterable);
};
-function STACK_OVERFLOW(length) {
- throw %MakeRangeError(kStackOverflow);
-}
-
-
-// Convert the receiver to a number - forward to ToNumber.
-function TO_NUMBER() {
- return %$toNumber(this);
-}
-
-
-// Convert the receiver to a string - forward to ToString.
-function TO_STRING() {
- return %$toString(this);
-}
-
-
-// Convert the receiver to a string or symbol - forward to ToName.
-function TO_NAME() {
- return %$toName(this);
-}
-
-
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------
*/
-// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
-// (1) for number hint, and (2) for string hint.
-function ToPrimitive(x, hint) {
- if (!IS_SPEC_OBJECT(x)) return x;
- if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
- return (hint == NUMBER_HINT) ? DefaultNumber(x) : DefaultString(x);
-}
-
-
// ECMA-262, section 9.2, page 30
function ToBoolean(x) {
if (IS_BOOLEAN(x)) return x;
@@ -710,36 +213,6 @@ function ToString(x) {
return (IS_NULL(x)) ? 'null' : ToString(DefaultString(x));
}
-function NonStringToString(x) {
- if (IS_NUMBER(x)) return %_NumberToString(x);
- if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(x)) return 'undefined';
- // Types that can't be converted to string are caught in DefaultString.
- return (IS_NULL(x)) ? 'null' : ToString(DefaultString(x));
-}
-
-
-// ES6 symbols
-function ToName(x) {
- return IS_SYMBOL(x) ? x : ToString(x);
-}
-
-
-// ECMA-262, section 9.4, page 34.
-function ToInteger(x) {
- if (%_IsSmi(x)) return x;
- return %NumberToInteger(ToNumber(x));
-}
-
-
-// ES6, draft 08-24-14, section 7.1.15
-function ToLength(arg) {
- arg = ToInteger(arg);
- if (arg < 0) return 0;
- return arg < GlobalNumber.MAX_SAFE_INTEGER ? arg
- : GlobalNumber.MAX_SAFE_INTEGER;
-}
-
// ES5, section 9.12
function SameValue(x, y) {
@@ -794,7 +267,7 @@ function IsPrimitive(x) {
// ES6, draft 10-14-14, section 22.1.3.1.1
function IsConcatSpreadable(O) {
if (!IS_SPEC_OBJECT(O)) return false;
- var spreadable = O[symbolIsConcatSpreadable];
+ var spreadable = O[isConcatSpreadableSymbol];
if (IS_UNDEFINED(spreadable)) return IS_ARRAY(O);
return ToBoolean(spreadable);
}
@@ -803,15 +276,15 @@ function IsConcatSpreadable(O) {
// ECMA-262, section 8.6.2.6, page 28.
function DefaultNumber(x) {
var valueOf = x.valueOf;
- if (IS_SPEC_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
+ if (IS_CALLABLE(valueOf)) {
+ var v = %_Call(valueOf, x);
if (IS_SYMBOL(v)) throw MakeTypeError(kSymbolToNumber);
if (IS_SIMD_VALUE(x)) throw MakeTypeError(kSimdToNumber);
if (IsPrimitive(v)) return v;
}
var toString = x.toString;
- if (IS_SPEC_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
+ if (IS_CALLABLE(toString)) {
+ var s = %_Call(toString, x);
if (IsPrimitive(s)) return s;
}
throw MakeTypeError(kCannotConvertToPrimitive);
@@ -822,14 +295,14 @@ function DefaultString(x) {
if (!IS_SYMBOL_WRAPPER(x)) {
if (IS_SYMBOL(x)) throw MakeTypeError(kSymbolToString);
var toString = x.toString;
- if (IS_SPEC_FUNCTION(toString)) {
- var s = %_CallFunction(x, toString);
+ if (IS_CALLABLE(toString)) {
+ var s = %_Call(toString, x);
if (IsPrimitive(s)) return s;
}
var valueOf = x.valueOf;
- if (IS_SPEC_FUNCTION(valueOf)) {
- var v = %_CallFunction(x, valueOf);
+ if (IS_CALLABLE(valueOf)) {
+ var v = %_Call(valueOf, x);
if (IsPrimitive(v)) return v;
}
}
@@ -854,77 +327,30 @@ function ToPositiveInteger(x, rangeErrorIndex) {
// ----------------------------------------------------------------------------
// Exports
-$concatIterableToArray = ConcatIterableToArray;
-$defaultNumber = DefaultNumber;
-$defaultString = DefaultString;
$NaN = %GetRootNaN();
$nonNumberToNumber = NonNumberToNumber;
-$nonStringToString = NonStringToString;
$sameValue = SameValue;
$sameValueZero = SameValueZero;
-$toInteger = ToInteger;
-$toLength = ToLength;
-$toName = ToName;
$toNumber = ToNumber;
$toPositiveInteger = ToPositiveInteger;
-$toPrimitive = ToPrimitive;
-$toString = ToString;
-
-%InstallJSBuiltins({
- EQUALS,
- COMPARE,
- COMPARE_STRONG,
- ADD,
- ADD_STRONG,
- STRING_ADD_LEFT,
- STRING_ADD_RIGHT,
- SUB,
- SUB_STRONG,
- MUL,
- MUL_STRONG,
- DIV,
- DIV_STRONG,
- MOD,
- MOD_STRONG,
- BIT_OR,
- BIT_OR_STRONG,
- BIT_AND,
- BIT_AND_STRONG,
- BIT_XOR,
- BIT_XOR_STRONG,
- SHL,
- SHL_STRONG,
- SAR,
- SAR_STRONG,
- SHR,
- SHR_STRONG,
- IN,
- INSTANCE_OF,
- CALL_NON_FUNCTION,
- CALL_NON_FUNCTION_AS_CONSTRUCTOR,
- CALL_FUNCTION_PROXY,
- CALL_FUNCTION_PROXY_AS_CONSTRUCTOR,
- CONCAT_ITERABLE_TO_ARRAY,
- APPLY_PREPARE,
- REFLECT_APPLY_PREPARE,
- REFLECT_CONSTRUCT_PREPARE,
- STACK_OVERFLOW,
- TO_NUMBER,
- TO_STRING,
- TO_NAME,
-});
-utils.ExportToRuntime(function(to) {
- to.ToNumber = ToNumber;
- to.ToString = ToString;
- to.ToInteger = ToInteger;
- to.ToLength = ToLength;
-});
+%InstallToContext([
+ "apply_prepare_builtin", APPLY_PREPARE,
+ "concat_iterable_to_array_builtin", CONCAT_ITERABLE_TO_ARRAY,
+ "reflect_apply_prepare_builtin", REFLECT_APPLY_PREPARE,
+ "reflect_construct_prepare_builtin", REFLECT_CONSTRUCT_PREPARE,
+]);
+
+%InstallToContext([
+ "concat_iterable_to_array", ConcatIterableToArray,
+ "non_number_to_number", NonNumberToNumber,
+ "to_number_fun", ToNumber,
+]);
utils.Export(function(to) {
to.ToBoolean = ToBoolean;
to.ToNumber = ToNumber;
to.ToString = ToString;
-})
+});
})
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index fa0d91bf23..6fc1ad4ea1 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -8,6 +8,7 @@
#include "src/conversions-inl.h"
#include "src/elements.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/prototype.h"
@@ -51,7 +52,6 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
- InstallBuiltin(isolate, holder, "concat", Builtins::kArrayConcat);
return *holder;
}
@@ -110,784 +110,6 @@ RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
}
-/**
- * A simple visitor visits every element of Array's.
- * The backend storage can be a fixed array for fast elements case,
- * or a dictionary for sparse array. Since Dictionary is a subtype
- * of FixedArray, the class can be used by both fast and slow cases.
- * The second parameter of the constructor, fast_elements, specifies
- * whether the storage is a FixedArray or Dictionary.
- *
- * An index limit is used to deal with the situation that a result array
- * length overflows 32-bit non-negative integer.
- */
-class ArrayConcatVisitor {
- public:
- ArrayConcatVisitor(Isolate* isolate, Handle<FixedArray> storage,
- bool fast_elements)
- : isolate_(isolate),
- storage_(Handle<FixedArray>::cast(
- isolate->global_handles()->Create(*storage))),
- index_offset_(0u),
- bit_field_(FastElementsField::encode(fast_elements) |
- ExceedsLimitField::encode(false)) {}
-
- ~ArrayConcatVisitor() { clear_storage(); }
-
- void visit(uint32_t i, Handle<Object> elm) {
- if (i >= JSObject::kMaxElementCount - index_offset_) {
- set_exceeds_array_limit(true);
- return;
- }
- uint32_t index = index_offset_ + i;
-
- if (fast_elements()) {
- if (index < static_cast<uint32_t>(storage_->length())) {
- storage_->set(index, *elm);
- return;
- }
- // Our initial estimate of length was foiled, possibly by
- // getters on the arrays increasing the length of later arrays
- // during iteration.
- // This shouldn't happen in anything but pathological cases.
- SetDictionaryMode();
- // Fall-through to dictionary mode.
- }
- DCHECK(!fast_elements());
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(*storage_));
- // The object holding this backing store has just been allocated, so
- // it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> result =
- SeededNumberDictionary::AtNumberPut(dict, index, elm, false);
- if (!result.is_identical_to(dict)) {
- // Dictionary needed to grow.
- clear_storage();
- set_storage(*result);
- }
- }
-
- void increase_index_offset(uint32_t delta) {
- if (JSObject::kMaxElementCount - index_offset_ < delta) {
- index_offset_ = JSObject::kMaxElementCount;
- } else {
- index_offset_ += delta;
- }
- // If the initial length estimate was off (see special case in visit()),
- // but the array blowing the limit didn't contain elements beyond the
- // provided-for index range, go to dictionary mode now.
- if (fast_elements() &&
- index_offset_ >
- static_cast<uint32_t>(FixedArrayBase::cast(*storage_)->length())) {
- SetDictionaryMode();
- }
- }
-
- bool exceeds_array_limit() const {
- return ExceedsLimitField::decode(bit_field_);
- }
-
- Handle<JSArray> ToArray() {
- Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
- Handle<Object> length =
- isolate_->factory()->NewNumber(static_cast<double>(index_offset_));
- Handle<Map> map = JSObject::GetElementsTransitionMap(
- array, fast_elements() ? FAST_HOLEY_ELEMENTS : DICTIONARY_ELEMENTS);
- array->set_map(*map);
- array->set_length(*length);
- array->set_elements(*storage_);
- return array;
- }
-
- private:
- // Convert storage to dictionary mode.
- void SetDictionaryMode() {
- DCHECK(fast_elements());
- Handle<FixedArray> current_storage(*storage_);
- Handle<SeededNumberDictionary> slow_storage(
- SeededNumberDictionary::New(isolate_, current_storage->length()));
- uint32_t current_length = static_cast<uint32_t>(current_storage->length());
- for (uint32_t i = 0; i < current_length; i++) {
- HandleScope loop_scope(isolate_);
- Handle<Object> element(current_storage->get(i), isolate_);
- if (!element->IsTheHole()) {
- // The object holding this backing store has just been allocated, so
- // it cannot yet be used as a prototype.
- Handle<SeededNumberDictionary> new_storage =
- SeededNumberDictionary::AtNumberPut(slow_storage, i, element,
- false);
- if (!new_storage.is_identical_to(slow_storage)) {
- slow_storage = loop_scope.CloseAndEscape(new_storage);
- }
- }
- }
- clear_storage();
- set_storage(*slow_storage);
- set_fast_elements(false);
- }
-
- inline void clear_storage() {
- GlobalHandles::Destroy(Handle<Object>::cast(storage_).location());
- }
-
- inline void set_storage(FixedArray* storage) {
- storage_ =
- Handle<FixedArray>::cast(isolate_->global_handles()->Create(storage));
- }
-
- class FastElementsField : public BitField<bool, 0, 1> {};
- class ExceedsLimitField : public BitField<bool, 1, 1> {};
-
- bool fast_elements() const { return FastElementsField::decode(bit_field_); }
- void set_fast_elements(bool fast) {
- bit_field_ = FastElementsField::update(bit_field_, fast);
- }
- void set_exceeds_array_limit(bool exceeds) {
- bit_field_ = ExceedsLimitField::update(bit_field_, exceeds);
- }
-
- Isolate* isolate_;
- Handle<FixedArray> storage_; // Always a global handle.
- // Index after last seen index. Always less than or equal to
- // JSObject::kMaxElementCount.
- uint32_t index_offset_;
- uint32_t bit_field_;
-};
-
-
-static uint32_t EstimateElementCount(Handle<JSArray> array) {
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- int element_count = 0;
- switch (array->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- Handle<FixedArray> elements(FixedArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->get(i)->IsTheHole()) element_count++;
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- if (array->elements()->IsFixedArray()) {
- DCHECK(FixedArray::cast(array->elements())->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->is_the_hole(i)) element_count++;
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dictionary(
- SeededNumberDictionary::cast(array->elements()));
- int capacity = dictionary->Capacity();
- for (int i = 0; i < capacity; i++) {
- Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate());
- if (dictionary->IsKey(*key)) {
- element_count++;
- }
- }
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS:
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- // External arrays are always dense.
- return length;
- }
- // As an estimate, we assume that the prototype doesn't contain any
- // inherited elements.
- return element_count;
-}
-
-
-template <class ExternalArrayClass, class ElementType>
-static void IterateTypedArrayElements(Isolate* isolate,
- Handle<JSObject> receiver,
- bool elements_are_ints,
- bool elements_are_guaranteed_smis,
- ArrayConcatVisitor* visitor) {
- Handle<ExternalArrayClass> array(
- ExternalArrayClass::cast(receiver->elements()));
- uint32_t len = static_cast<uint32_t>(array->length());
-
- DCHECK(visitor != NULL);
- if (elements_are_ints) {
- if (elements_are_guaranteed_smis) {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))),
- isolate);
- visitor->visit(j, e);
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- int64_t val = static_cast<int64_t>(array->get_scalar(j));
- if (Smi::IsValid(static_cast<intptr_t>(val))) {
- Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate);
- visitor->visit(j, e);
- } else {
- Handle<Object> e =
- isolate->factory()->NewNumber(static_cast<ElementType>(val));
- visitor->visit(j, e);
- }
- }
- }
- } else {
- for (uint32_t j = 0; j < len; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> e = isolate->factory()->NewNumber(array->get_scalar(j));
- visitor->visit(j, e);
- }
- }
-}
-
-
-// Used for sorting indices in a List<uint32_t>.
-static int compareUInt32(const uint32_t* ap, const uint32_t* bp) {
- uint32_t a = *ap;
- uint32_t b = *bp;
- return (a == b) ? 0 : (a < b) ? -1 : 1;
-}
-
-
-static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
- List<uint32_t>* indices) {
- Isolate* isolate = object->GetIsolate();
- ElementsKind kind = object->GetElementsKind();
- switch (kind) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- Handle<FixedArray> elements(FixedArray::cast(object->elements()));
- uint32_t length = static_cast<uint32_t>(elements->length());
- if (range < length) length = range;
- for (uint32_t i = 0; i < length; i++) {
- if (!elements->get(i)->IsTheHole()) {
- indices->Add(i);
- }
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- if (object->elements()->IsFixedArray()) {
- DCHECK(object->elements()->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()));
- uint32_t length = static_cast<uint32_t>(elements->length());
- if (range < length) length = range;
- for (uint32_t i = 0; i < length; i++) {
- if (!elements->is_the_hole(i)) {
- indices->Add(i);
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(
- SeededNumberDictionary::cast(object->elements()));
- uint32_t capacity = dict->Capacity();
- for (uint32_t j = 0; j < capacity; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> k(dict->KeyAt(j), isolate);
- if (dict->IsKey(*k)) {
- DCHECK(k->IsNumber());
- uint32_t index = static_cast<uint32_t>(k->Number());
- if (index < range) {
- indices->Add(index);
- }
- }
- }
- break;
- }
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
- case TYPE##_ELEMENTS: \
-
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
- {
- uint32_t length = static_cast<uint32_t>(
- FixedArrayBase::cast(object->elements())->length());
- if (range <= length) {
- length = range;
- // We will add all indices, so we might as well clear it first
- // and avoid duplicates.
- indices->Clear();
- }
- for (uint32_t i = 0; i < length; i++) {
- indices->Add(i);
- }
- if (length == range) return; // All indices accounted for already.
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- ElementsAccessor* accessor = object->GetElementsAccessor();
- for (uint32_t i = 0; i < range; i++) {
- if (accessor->HasElement(object, i)) {
- indices->Add(i);
- }
- }
- break;
- }
- }
-
- PrototypeIterator iter(isolate, object);
- if (!iter.IsAtEnd()) {
- // The prototype will usually have no inherited element indices,
- // but we have to check.
- CollectElementIndices(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), range,
- indices);
- }
-}
-
-
-static bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
- uint32_t length, ArrayConcatVisitor* visitor) {
- for (uint32_t i = 0; i < length; ++i) {
- HandleScope loop_scope(isolate);
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
- if (!maybe.IsJust()) return false;
- if (maybe.FromJust()) {
- Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_value,
- Object::GetElement(isolate, receiver, i),
- false);
- visitor->visit(i, element_value);
- }
- }
- visitor->increase_index_offset(length);
- return true;
-}
-
-
-/**
- * A helper function that visits elements of a JSObject in numerical
- * order.
- *
- * The visitor argument called for each existing element in the array
- * with the element index and the element's value.
- * Afterwards it increments the base-index of the visitor by the array
- * length.
- * Returns false if any access threw an exception, otherwise true.
- */
-static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
- ArrayConcatVisitor* visitor) {
- uint32_t length = 0;
-
- if (receiver->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(receiver));
- length = static_cast<uint32_t>(array->length()->Number());
- } else {
- Handle<Object> val;
- Handle<Object> key(isolate->heap()->length_string(), isolate);
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
- Runtime::GetObjectProperty(isolate, receiver, key), false);
- // TODO(caitp): Support larger element indexes (up to 2^53-1).
- if (!val->ToUint32(&length)) {
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
- Execution::ToLength(isolate, val), false);
- val->ToUint32(&length);
- }
- }
-
- if (!(receiver->IsJSArray() || receiver->IsJSTypedArray())) {
- // For classes which are not known to be safe to access via elements alone,
- // use the slow case.
- return IterateElementsSlow(isolate, receiver, length, visitor);
- }
-
- switch (receiver->GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
- int fast_length = static_cast<int>(length);
- DCHECK(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
- Handle<Object> element_value(elements->get(j), isolate);
- if (!element_value->IsTheHole()) {
- visitor->visit(j, element_value);
- } else {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
- if (!maybe.IsJust()) return false;
- if (maybe.FromJust()) {
- // Call GetElement on receiver, not its prototype, or getters won't
- // have the correct receiver.
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Object::GetElement(isolate, receiver, j), false);
- visitor->visit(j, element_value);
- }
- }
- }
- break;
- }
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Empty array is FixedArray but not FixedDoubleArray.
- if (length == 0) break;
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- if (receiver->elements()->IsFixedArray()) {
- DCHECK(receiver->elements()->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(receiver->elements()));
- int fast_length = static_cast<int>(length);
- DCHECK(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
- if (!elements->is_the_hole(j)) {
- double double_value = elements->get_scalar(j);
- Handle<Object> element_value =
- isolate->factory()->NewNumber(double_value);
- visitor->visit(j, element_value);
- } else {
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
- if (!maybe.IsJust()) return false;
- if (maybe.FromJust()) {
- // Call GetElement on receiver, not its prototype, or getters won't
- // have the correct receiver.
- Handle<Object> element_value;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element_value,
- Object::GetElement(isolate, receiver, j), false);
- visitor->visit(j, element_value);
- }
- }
- }
- break;
- }
- case DICTIONARY_ELEMENTS: {
- Handle<SeededNumberDictionary> dict(receiver->element_dictionary());
- List<uint32_t> indices(dict->Capacity() / 2);
- // Collect all indices in the object and the prototypes less
- // than length. This might introduce duplicates in the indices list.
- CollectElementIndices(receiver, length, &indices);
- indices.Sort(&compareUInt32);
- int j = 0;
- int n = indices.length();
- while (j < n) {
- HandleScope loop_scope(isolate);
- uint32_t index = indices[j];
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, receiver, index),
- false);
- visitor->visit(index, element);
- // Skip to next different index (i.e., omit duplicates).
- do {
- j++;
- } while (j < n && indices[j] == index);
- }
- break;
- }
- case UINT8_CLAMPED_ELEMENTS: {
- Handle<FixedUint8ClampedArray> pixels(
- FixedUint8ClampedArray::cast(receiver->elements()));
- for (uint32_t j = 0; j < length; j++) {
- Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
- visitor->visit(j, e);
- }
- break;
- }
- case INT8_ELEMENTS: {
- IterateTypedArrayElements<FixedInt8Array, int8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case UINT8_ELEMENTS: {
- IterateTypedArrayElements<FixedUint8Array, uint8_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case INT16_ELEMENTS: {
- IterateTypedArrayElements<FixedInt16Array, int16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case UINT16_ELEMENTS: {
- IterateTypedArrayElements<FixedUint16Array, uint16_t>(
- isolate, receiver, true, true, visitor);
- break;
- }
- case INT32_ELEMENTS: {
- IterateTypedArrayElements<FixedInt32Array, int32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
- case UINT32_ELEMENTS: {
- IterateTypedArrayElements<FixedUint32Array, uint32_t>(
- isolate, receiver, true, false, visitor);
- break;
- }
- case FLOAT32_ELEMENTS: {
- IterateTypedArrayElements<FixedFloat32Array, float>(
- isolate, receiver, false, false, visitor);
- break;
- }
- case FLOAT64_ELEMENTS: {
- IterateTypedArrayElements<FixedFloat64Array, double>(
- isolate, receiver, false, false, visitor);
- break;
- }
- case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
- case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
- for (uint32_t index = 0; index < length; index++) {
- HandleScope loop_scope(isolate);
- Handle<Object> element;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, element, Object::GetElement(isolate, receiver, index),
- false);
- visitor->visit(index, element);
- }
- break;
- }
- }
- visitor->increase_index_offset(length);
- return true;
-}
-
-
-static bool IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
- HandleScope handle_scope(isolate);
- if (!obj->IsSpecObject()) return false;
- if (FLAG_harmony_concat_spreadable) {
- Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
- Handle<Object> value;
- MaybeHandle<Object> maybeValue =
- i::Runtime::GetObjectProperty(isolate, obj, key);
- if (maybeValue.ToHandle(&value)) {
- if (!value->IsUndefined()) {
- return value->BooleanValue();
- }
- }
- }
- return obj->IsJSArray();
-}
-
-
-/**
- * Array::concat implementation.
- * See ECMAScript 262, 15.4.4.4.
- * TODO(581): Fix non-compliance for very large concatenations and update to
- * following the ECMAScript 5 specification.
- */
-RUNTIME_FUNCTION(Runtime_ArrayConcat) {
- HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
- int argument_count = static_cast<int>(arguments->length()->Number());
- RUNTIME_ASSERT(arguments->HasFastObjectElements());
- Handle<FixedArray> elements(FixedArray::cast(arguments->elements()));
-
- // Pass 1: estimate the length and number of elements of the result.
- // The actual length can be larger if any of the arguments have getters
- // that mutate other arguments (but will otherwise be precise).
- // The number of elements is precise if there are no inherited elements.
-
- ElementsKind kind = FAST_SMI_ELEMENTS;
-
- uint32_t estimate_result_length = 0;
- uint32_t estimate_nof_elements = 0;
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope(isolate);
- Handle<Object> obj(elements->get(i), isolate);
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- length_estimate = static_cast<uint32_t>(array->length()->Number());
- if (length_estimate != 0) {
- ElementsKind array_kind =
- GetPackedElementsKind(array->map()->elements_kind());
- if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
- kind = array_kind;
- }
- }
- element_estimate = EstimateElementCount(array);
- } else {
- if (obj->IsHeapObject()) {
- if (obj->IsNumber()) {
- if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
- kind = FAST_DOUBLE_ELEMENTS;
- }
- } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
- kind = FAST_ELEMENTS;
- }
- }
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length < length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
- }
- if (JSObject::kMaxElementCount - estimate_nof_elements < element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
- }
- }
-
- // If estimated number of elements is more than half of length, a
- // fixed array (fast case) is more time and space-efficient than a
- // dictionary.
- bool fast_case = (estimate_nof_elements * 2) >= estimate_result_length;
-
- if (fast_case && kind == FAST_DOUBLE_ELEMENTS) {
- Handle<FixedArrayBase> storage =
- isolate->factory()->NewFixedDoubleArray(estimate_result_length);
- int j = 0;
- bool failure = false;
- if (estimate_result_length > 0) {
- Handle<FixedDoubleArray> double_storage =
- Handle<FixedDoubleArray>::cast(storage);
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i), isolate);
- if (obj->IsSmi()) {
- double_storage->set(j, Smi::cast(*obj)->value());
- j++;
- } else if (obj->IsNumber()) {
- double_storage->set(j, obj->Number());
- j++;
- } else {
- JSArray* array = JSArray::cast(*obj);
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- switch (array->map()->elements_kind()) {
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Empty array is FixedArray but not FixedDoubleArray.
- if (length == 0) break;
- FixedDoubleArray* elements =
- FixedDoubleArray::cast(array->elements());
- for (uint32_t i = 0; i < length; i++) {
- if (elements->is_the_hole(i)) {
- // TODO(jkummerow/verwaest): We could be a bit more clever
- // here: Check if there are no elements/getters on the
- // prototype chain, and if so, allow creation of a holey
- // result array.
- // Same thing below (holey smi case).
- failure = true;
- break;
- }
- double double_value = elements->get_scalar(i);
- double_storage->set(j, double_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* elements(FixedArray::cast(array->elements()));
- for (uint32_t i = 0; i < length; i++) {
- Object* element = elements->get(i);
- if (element->IsTheHole()) {
- failure = true;
- break;
- }
- int32_t int_value = Smi::cast(element)->value();
- double_storage->set(j, int_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- case FAST_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- DCHECK_EQ(0u, length);
- break;
- default:
- UNREACHABLE();
- }
- }
- if (failure) break;
- }
- }
- if (!failure) {
- Handle<JSArray> array = isolate->factory()->NewJSArray(0);
- Smi* length = Smi::FromInt(j);
- Handle<Map> map;
- map = JSObject::GetElementsTransitionMap(array, kind);
- array->set_map(*map);
- array->set_length(length);
- array->set_elements(*storage);
- return *array;
- }
- // In case of failure, fall through.
- }
-
- Handle<FixedArray> storage;
- if (fast_case) {
- // The backing storage array must have non-existing elements to preserve
- // holes across concat operations.
- storage =
- isolate->factory()->NewFixedArrayWithHoles(estimate_result_length);
- } else {
- // TODO(126): move 25% pre-allocation logic into Dictionary::Allocate
- uint32_t at_least_space_for =
- estimate_nof_elements + (estimate_nof_elements >> 2);
- storage = Handle<FixedArray>::cast(
- SeededNumberDictionary::New(isolate, at_least_space_for));
- }
-
- ArrayConcatVisitor visitor(isolate, storage, fast_case);
-
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i), isolate);
- bool spreadable = IsConcatSpreadable(isolate, obj);
- if (isolate->has_pending_exception()) return isolate->heap()->exception();
- if (spreadable) {
- Handle<JSObject> object = Handle<JSObject>::cast(obj);
- if (!IterateElements(isolate, object, &visitor)) {
- return isolate->heap()->exception();
- }
- } else {
- visitor.visit(0, obj);
- visitor.increase_index_offset(1);
- }
- }
-
- if (visitor.exceeds_array_limit()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kInvalidArrayLength));
- }
- return *visitor.ToArray();
-}
-
-
// Moves all own elements of an object, that are below a limit, to positions
// starting at zero. All undefined values are placed after non-undefined values,
// and are followed by non-existing element. Does not change the length
@@ -975,39 +197,39 @@ RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
- if (array->elements()->IsDictionary()) {
- Handle<FixedArray> keys = isolate->factory()->empty_fixed_array();
- for (PrototypeIterator iter(isolate, array,
- PrototypeIterator::START_AT_RECEIVER);
- !iter.IsAtEnd(); iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() ||
- JSObject::cast(*PrototypeIterator::GetCurrent(iter))
- ->HasIndexedInterceptor()) {
- // Bail out if we find a proxy or interceptor, likely not worth
- // collecting keys in that case.
- return *isolate->factory()->NewNumberFromUint(length);
- }
- Handle<JSObject> current =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
- Handle<FixedArray> current_keys =
- isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE));
- current->GetOwnElementKeys(*current_keys, NONE);
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys, FixedArray::UnionOfKeys(keys, current_keys));
- }
- // Erase any keys >= length.
- // TODO(adamk): Remove this step when the contract of %GetArrayKeys
- // is changed to let this happen on the JS side.
- for (int i = 0; i < keys->length(); i++) {
- if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
- }
- return *isolate->factory()->NewJSArrayWithElements(keys);
- } else {
+
+ if (!array->elements()->IsDictionary()) {
RUNTIME_ASSERT(array->HasFastSmiOrObjectElements() ||
array->HasFastDoubleElements());
uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
return *isolate->factory()->NewNumberFromUint(Min(actual_length, length));
}
+
+ KeyAccumulator accumulator(isolate);
+ for (PrototypeIterator iter(isolate, array,
+ PrototypeIterator::START_AT_RECEIVER);
+ !iter.IsAtEnd(); iter.Advance()) {
+ if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() ||
+ PrototypeIterator::GetCurrent<JSObject>(iter)
+ ->HasIndexedInterceptor()) {
+ // Bail out if we find a proxy or interceptor, likely not worth
+ // collecting keys in that case.
+ return *isolate->factory()->NewNumberFromUint(length);
+ }
+ Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
+ Handle<FixedArray> current_keys =
+ isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE));
+ current->GetOwnElementKeys(*current_keys, NONE);
+ accumulator.AddKeys(current_keys, FixedArray::ALL_KEYS);
+ }
+ // Erase any keys >= length.
+ // TODO(adamk): Remove this step when the contract of %GetArrayKeys
+ // is changed to let this happen on the JS side.
+ Handle<FixedArray> keys = accumulator.GetKeys();
+ for (int i = 0; i < keys->length(); i++) {
+ if (NumberToUint32(keys->get(i)) >= length) keys->set_undefined(i);
+ }
+ return *isolate->factory()->NewJSArrayWithElements(keys);
}
@@ -1232,8 +454,7 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
return isolate->heap()->true_value();
}
- Handle<JSObject> current =
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+ Handle<JSObject> current = PrototypeIterator::GetCurrent<JSObject>(iter);
if (current->HasIndexedInterceptor()) {
return isolate->heap()->true_value();
}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 9b9fa0b12d..84eab2ce11 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -12,13 +12,17 @@
// Implement Atomic accesses to SharedArrayBuffers as defined in the
// SharedArrayBuffer draft spec, found here
-// https://docs.google.com/document/d/1NDGA_gZJ7M7w1Bh8S0AoDyEqwDdRh4uSoTPSNn77PFk
+// https://github.com/lars-t-hansen/ecmascript_sharedmem
namespace v8 {
namespace internal {
namespace {
+inline bool AtomicIsLockFree(uint32_t size) {
+ return size == 1 || size == 2 || size == 4;
+}
+
#if V8_CC_GNU
template <typename T>
@@ -70,37 +74,6 @@ inline T ExchangeSeqCst(T* p, T value) {
return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
}
-#if ATOMICS_REQUIRE_LOCK_64_BIT
-
-// We only need to implement the following functions, because the rest of the
-// atomic operations only work on integer types, and the only 64-bit type is
-// float64. Similarly, because the values are being bit_cast from double ->
-// uint64_t, we don't need to implement these functions for int64_t either.
-
-static base::LazyMutex atomic_mutex = LAZY_MUTEX_INITIALIZER;
-
-inline uint64_t CompareExchangeSeqCst(uint64_t* p, uint64_t oldval,
- uint64_t newval) {
- base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
- uint64_t result = *p;
- if (result == oldval) *p = newval;
- return result;
-}
-
-
-inline uint64_t LoadSeqCst(uint64_t* p) {
- base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
- return *p;
-}
-
-
-inline void StoreSeqCst(uint64_t* p, uint64_t value) {
- base::LockGuard<base::Mutex> lock_guard(atomic_mutex.Pointer());
- *p = value;
-}
-
-#endif // ATOMICS_REQUIRE_LOCK_64_BIT
-
#elif V8_CC_MSVC
#define InterlockedCompareExchange32 _InterlockedCompareExchange
@@ -113,33 +86,32 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {
#define InterlockedCompareExchange8 _InterlockedCompareExchange8
#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
-#define ATOMIC_OPS_INTEGER(type, suffix, vctype) \
- inline type AddSeqCst(type* p, type value) { \
- return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type SubSeqCst(type* p, type value) { \
- return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
- -bit_cast<vctype>(value)); \
- } \
- inline type AndSeqCst(type* p, type value) { \
- return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type OrSeqCst(type* p, type value) { \
- return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type XorSeqCst(type* p, type value) { \
- return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
- bit_cast<vctype>(value)); \
- } \
- inline type ExchangeSeqCst(type* p, type value) { \
- return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
- bit_cast<vctype>(value)); \
- }
-
-#define ATOMIC_OPS_FLOAT(type, suffix, vctype) \
+#define ATOMIC_OPS(type, suffix, vctype) \
+ inline type AddSeqCst(type* p, type value) { \
+ return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type SubSeqCst(type* p, type value) { \
+ return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
+ -bit_cast<vctype>(value)); \
+ } \
+ inline type AndSeqCst(type* p, type value) { \
+ return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type OrSeqCst(type* p, type value) { \
+ return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type XorSeqCst(type* p, type value) { \
+ return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ inline type ExchangeSeqCst(type* p, type value) { \
+ return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
+ bit_cast<vctype>(value)); \
+ } \
+ \
inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
bit_cast<vctype>(newval), \
@@ -151,20 +123,14 @@ inline void StoreSeqCst(uint64_t* p, uint64_t value) {
bit_cast<vctype>(value)); \
}
-#define ATOMIC_OPS(type, suffix, vctype) \
- ATOMIC_OPS_INTEGER(type, suffix, vctype) \
- ATOMIC_OPS_FLOAT(type, suffix, vctype)
-
ATOMIC_OPS(int8_t, 8, char)
ATOMIC_OPS(uint8_t, 8, char)
ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
-ATOMIC_OPS_FLOAT(uint64_t, 64, LONGLONG)
#undef ATOMIC_OPS_INTEGER
-#undef ATOMIC_OPS_FLOAT
#undef ATOMIC_OPS
#undef InterlockedCompareExchange32
@@ -196,46 +162,16 @@ inline int32_t FromObject<int32_t>(Handle<Object> number) {
return NumberToInt32(*number);
}
-template <>
-inline float FromObject<float>(Handle<Object> number) {
- return static_cast<float>(number->Number());
-}
-
-template <>
-inline double FromObject<double>(Handle<Object> number) {
- return number->Number();
-}
-
template <typename T, typename F>
inline T ToAtomic(F from) {
return static_cast<T>(from);
}
-template <>
-inline uint32_t ToAtomic<uint32_t, float>(float from) {
- return bit_cast<uint32_t, float>(from);
-}
-
-template <>
-inline uint64_t ToAtomic<uint64_t, double>(double from) {
- return bit_cast<uint64_t, double>(from);
-}
-
template <typename T, typename F>
inline T FromAtomic(F from) {
return static_cast<T>(from);
}
-template <>
-inline float FromAtomic<float, uint32_t>(uint32_t from) {
- return bit_cast<float, uint32_t>(from);
-}
-
-template <>
-inline double FromAtomic<double, uint64_t>(uint64_t from) {
- return bit_cast<double, uint64_t>(from);
-}
-
template <typename T>
inline Object* ToObject(Isolate* isolate, T t);
@@ -269,16 +205,6 @@ inline Object* ToObject<uint32_t>(Isolate* isolate, uint32_t t) {
return *isolate->factory()->NewNumber(t);
}
-template <>
-inline Object* ToObject<float>(Isolate* isolate, float t) {
- return *isolate->factory()->NewNumber(t);
-}
-
-template <>
-inline Object* ToObject<double>(Isolate* isolate, double t) {
- return *isolate->factory()->NewNumber(t);
-}
-
template <typename T>
struct FromObjectTraits {};
@@ -318,18 +244,6 @@ struct FromObjectTraits<uint32_t> {
typedef uint32_t atomic_type;
};
-template <>
-struct FromObjectTraits<float> {
- typedef float convert_type;
- typedef uint32_t atomic_type;
-};
-
-template <>
-struct FromObjectTraits<double> {
- typedef double convert_type;
- typedef uint64_t atomic_type;
-};
-
template <typename T>
inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
@@ -540,12 +454,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) {
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- case kExternalFloat32Array:
- return DoCompareExchange<float>(isolate, buffer, index, oldobj, newobj);
-
- case kExternalFloat64Array:
- return DoCompareExchange<double>(isolate, buffer, index, oldobj, newobj);
-
case kExternalUint8ClampedArray:
return DoCompareExchangeUint8Clamped(isolate, buffer, index, oldobj,
newobj);
@@ -574,9 +482,12 @@ RUNTIME_FUNCTION(Runtime_AtomicsLoad) {
case kExternal##Type##Array: \
return DoLoad<ctype>(isolate, buffer, index);
- TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
+ case kExternalUint8ClampedArray:
+ return DoLoad<uint8_t>(isolate, buffer, index);
+
default:
break;
}
@@ -605,12 +516,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsStore) {
INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE)
#undef TYPED_ARRAY_CASE
- case kExternalFloat32Array:
- return DoStore<float>(isolate, buffer, index, value);
-
- case kExternalFloat64Array:
- return DoStore<double>(isolate, buffer, index, value);
-
case kExternalUint8ClampedArray:
return DoStoreUint8Clamped(isolate, buffer, index, value);
@@ -645,8 +550,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsAdd) {
case kExternalUint8ClampedArray:
return DoAddUint8Clamped(isolate, buffer, index, value);
- case kExternalFloat32Array:
- case kExternalFloat64Array:
default:
break;
}
@@ -678,8 +581,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsSub) {
case kExternalUint8ClampedArray:
return DoSubUint8Clamped(isolate, buffer, index, value);
- case kExternalFloat32Array:
- case kExternalFloat64Array:
default:
break;
}
@@ -711,8 +612,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsAnd) {
case kExternalUint8ClampedArray:
return DoAndUint8Clamped(isolate, buffer, index, value);
- case kExternalFloat32Array:
- case kExternalFloat64Array:
default:
break;
}
@@ -744,8 +643,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) {
case kExternalUint8ClampedArray:
return DoOrUint8Clamped(isolate, buffer, index, value);
- case kExternalFloat32Array:
- case kExternalFloat64Array:
default:
break;
}
@@ -777,8 +674,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) {
case kExternalUint8ClampedArray:
return DoXorUint8Clamped(isolate, buffer, index, value);
- case kExternalFloat32Array:
- case kExternalFloat64Array:
default:
break;
}
@@ -810,8 +705,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsExchange) {
case kExternalUint8ClampedArray:
return DoExchangeUint8Clamped(isolate, buffer, index, value);
- case kExternalFloat32Array:
- case kExternalFloat64Array:
default:
break;
}
@@ -826,9 +719,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
DCHECK(args.length() == 1);
CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
uint32_t usize = NumberToUint32(*size);
-
- return Runtime::AtomicIsLockFree(usize) ? isolate->heap()->true_value()
- : isolate->heap()->false_value();
+ return isolate->heap()->ToBoolean(AtomicIsLockFree(usize));
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 8692b9b800..51e682f325 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -10,6 +10,7 @@
#include "src/arguments.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/runtime/runtime.h"
@@ -73,19 +74,6 @@ RUNTIME_FUNCTION(Runtime_ThrowIfStaticPrototype) {
}
-RUNTIME_FUNCTION(Runtime_ToMethod) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
- Handle<JSFunction> clone = JSFunction::CloneClosure(fun);
- Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
- JSObject::SetOwnPropertyIgnoreAttributes(clone, home_object_symbol,
- home_object, DONT_ENUM).Assert();
- return *clone;
-}
-
-
RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
DCHECK(args.length() == 0);
return isolate->heap()->home_object_symbol();
@@ -104,8 +92,9 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
} else {
if (super_class->IsNull()) {
prototype_parent = isolate->factory()->null_value();
- } else if (super_class->IsSpecFunction()) {
- if (Handle<JSFunction>::cast(super_class)->shared()->is_generator()) {
+ } else if (super_class->IsConstructor()) {
+ if (super_class->IsJSFunction() &&
+ Handle<JSFunction>::cast(super_class)->shared()->is_generator()) {
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kExtendsValueGenerator, super_class),
@@ -125,7 +114,6 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
}
constructor_parent = super_class;
} else {
- // TODO(arv): Should be IsConstructor.
THROW_NEW_ERROR(
isolate,
NewTypeError(MessageTemplate::kExtendsValueNotFunction, super_class),
@@ -137,6 +125,11 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate, Handle<Object> name,
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
if (constructor->map()->is_strong()) {
map->set_is_strong();
+ if (super_class->IsNull()) {
+ // Strong class is not permitted to extend null.
+ THROW_NEW_ERROR(isolate, NewTypeError(MessageTemplate::kStrongExtendNull),
+ Object);
+ }
}
Map::SetPrototype(map, prototype_parent);
map->SetConstructor(*constructor);
@@ -211,28 +204,6 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
}
-RUNTIME_FUNCTION(Runtime_DefineClassStrong) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 5);
- CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
- CONVERT_SMI_ARG_CHECKED(start_position, 3);
- CONVERT_SMI_ARG_CHECKED(end_position, 4);
-
- if (super_class->IsNull()) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kStrongExtendNull));
- }
-
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, DefineClass(isolate, name, super_class, constructor,
- start_position, end_position));
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -380,7 +351,7 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
Handle<Name> name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Runtime::ToName(isolate, key));
+ Object::ToName(isolate, key));
// TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -477,7 +448,7 @@ static Object* StoreKeyedToSuper(Isolate* isolate, Handle<JSObject> home_object,
}
Handle<Name> name;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
- Runtime::ToName(isolate, key));
+ Object::ToName(isolate, key));
// TODO(verwaest): Unify using LookupIterator.
if (name->AsArrayIndex(&index)) {
return StoreElementToSuper(isolate, home_object, receiver, index, value,
@@ -527,46 +498,22 @@ RUNTIME_FUNCTION(Runtime_DefaultConstructorCallSuper) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, original_constructor, 0);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, actual_constructor, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, super_constructor, 1);
JavaScriptFrameIterator it(isolate);
- // Prepare the callee to the super call. The super constructor is stored as
- // the prototype of the constructor we are currently executing.
- Handle<Object> super_constructor;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, super_constructor,
- Runtime::GetPrototype(isolate, actual_constructor));
-
- // Find the frame that holds the actual arguments passed to the function.
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- // Prepare the array containing all passed arguments.
- int argument_count = frame->GetArgumentsLength();
- Handle<FixedArray> elements =
- isolate->factory()->NewUninitializedFixedArray(argument_count);
- for (int i = 0; i < argument_count; ++i) {
- elements->set(i, frame->GetParameter(i));
- }
- Handle<JSArray> arguments = isolate->factory()->NewJSArrayWithElements(
- elements, FAST_ELEMENTS, argument_count);
+ // Determine the actual arguments passed to the function.
+ int argument_count = 0;
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ Runtime::GetCallerArguments(isolate, 0, &argument_count);
- // Call $reflectConstruct(<super>, <args>, <new.target>) now.
- Handle<Object> reflect;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, reflect,
- Object::GetProperty(isolate,
- handle(isolate->native_context()->builtins()),
- "$reflectConstruct"));
- RUNTIME_ASSERT(reflect->IsJSFunction()); // Depends on --harmony-reflect.
- Handle<Object> argv[] = {super_constructor, arguments, original_constructor};
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Execution::Call(isolate, reflect, isolate->factory()->undefined_value(),
- arraysize(argv), argv));
+ Execution::New(isolate, super_constructor, original_constructor,
+ argument_count, arguments.get()));
return *result;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 3450fca0e2..32340e5acb 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -45,17 +45,11 @@ RUNTIME_FUNCTION(Runtime_GenericHash) {
}
-void Runtime::JSSetInitialize(Isolate* isolate, Handle<JSSet> set) {
- Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
- set->set_table(*table);
-}
-
-
RUNTIME_FUNCTION(Runtime_SetInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Runtime::JSSetInitialize(isolate, holder);
+ JSSet::Initialize(holder, isolate);
return *holder;
}
@@ -82,18 +76,11 @@ RUNTIME_FUNCTION(Runtime_SetShrink) {
}
-void Runtime::JSSetClear(Isolate* isolate, Handle<JSSet> set) {
- Handle<OrderedHashSet> table(OrderedHashSet::cast(set->table()));
- table = OrderedHashSet::Clear(table);
- set->set_table(*table);
-}
-
-
RUNTIME_FUNCTION(Runtime_SetClear) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Runtime::JSSetClear(isolate, holder);
+ JSSet::Clear(holder);
return isolate->heap()->undefined_value();
}
@@ -153,17 +140,11 @@ RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
}
-void Runtime::JSMapInitialize(Isolate* isolate, Handle<JSMap> map) {
- Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
- map->set_table(*table);
-}
-
-
RUNTIME_FUNCTION(Runtime_MapInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Runtime::JSMapInitialize(isolate, holder);
+ JSMap::Initialize(holder, isolate);
return *holder;
}
@@ -179,18 +160,11 @@ RUNTIME_FUNCTION(Runtime_MapShrink) {
}
-void Runtime::JSMapClear(Isolate* isolate, Handle<JSMap> map) {
- Handle<OrderedHashMap> table(OrderedHashMap::cast(map->table()));
- table = OrderedHashMap::Clear(table);
- map->set_table(*table);
-}
-
-
RUNTIME_FUNCTION(Runtime_MapClear) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Runtime::JSMapClear(isolate, holder);
+ JSMap::Clear(holder);
return isolate->heap()->undefined_value();
}
@@ -297,19 +271,11 @@ RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
}
-void Runtime::WeakCollectionInitialize(
- Isolate* isolate, Handle<JSWeakCollection> weak_collection) {
- DCHECK_EQ(0, weak_collection->map()->GetInObjectProperties());
- Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
- weak_collection->set_table(*table);
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
- Runtime::WeakCollectionInitialize(isolate, weak_collection);
+ JSWeakCollection::Initialize(weak_collection, isolate);
return *weak_collection;
}
@@ -344,32 +310,6 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
}
-bool Runtime::WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key) {
- int32_t hash =
- Object::GetOrCreateHash(weak_collection->GetIsolate(), key)->value();
- return WeakCollectionDelete(weak_collection, key, hash);
-}
-
-
-bool Runtime::WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key, int32_t hash) {
- DCHECK(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- DCHECK(table->IsKey(*key));
- bool was_present = false;
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Remove(table, key, &was_present, hash);
- weak_collection->set_table(*new_table);
- if (*table != *new_table) {
- // Zap the old table since we didn't record slots for its elements.
- table->FillWithHoles(0, table->length());
- }
- return was_present;
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -380,28 +320,11 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- bool was_present = Runtime::WeakCollectionDelete(weak_collection, key, hash);
+ bool was_present = JSWeakCollection::Delete(weak_collection, key, hash);
return isolate->heap()->ToBoolean(was_present);
}
-void Runtime::WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key, Handle<Object> value,
- int32_t hash) {
- DCHECK(key->IsJSReceiver() || key->IsSymbol());
- Handle<ObjectHashTable> table(
- ObjectHashTable::cast(weak_collection->table()));
- DCHECK(table->IsKey(*key));
- Handle<ObjectHashTable> new_table =
- ObjectHashTable::Put(table, key, value, hash);
- weak_collection->set_table(*new_table);
- if (*table != *new_table) {
- // Zap the old table since we didn't record slots for its elements.
- table->FillWithHoles(0, table->length());
- }
-}
-
-
RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
@@ -413,7 +336,7 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
Handle<ObjectHashTable> table(
ObjectHashTable::cast(weak_collection->table()));
RUNTIME_ASSERT(table->IsKey(*key));
- Runtime::WeakCollectionSet(weak_collection, key, value, hash);
+ JSWeakCollection::Set(weak_collection, key, value, hash);
return *weak_collection;
}
@@ -451,7 +374,7 @@ RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
Handle<JSWeakMap> weakmap = isolate->factory()->NewJSWeakMap();
- Runtime::WeakCollectionInitialize(isolate, weakmap);
+ JSWeakCollection::Initialize(weakmap, isolate);
return *weakmap;
}
} // namespace internal
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index e7f567f885..8790da05e3 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -9,6 +9,7 @@
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
@@ -27,6 +28,8 @@ RUNTIME_FUNCTION(Runtime_CompileLazy) {
PrintF("]\n");
}
#endif
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
// Compile the target function.
DCHECK(function->shared()->allows_lazy_compilation());
@@ -47,6 +50,9 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+ StackLimitCheck check(isolate);
+ if (check.JsHasOverflowed(1 * KB)) return isolate->StackOverflow();
+
Compiler::ConcurrencyMode mode =
concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
Handle<Code> code;
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 4231d82c34..614b4a9ede 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -9,6 +9,7 @@
#include "src/date.h"
#include "src/dateparser-inl.h"
#include "src/factory.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
namespace v8 {
@@ -38,7 +39,6 @@ RUNTIME_FUNCTION(Runtime_DateSetValue) {
DateCache* date_cache = isolate->date_cache();
Handle<Object> value;
- ;
bool is_value_nan = false;
if (std::isnan(time)) {
value = isolate->factory()->nan_value();
@@ -99,8 +99,8 @@ RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
RUNTIME_FUNCTION(Runtime_DateParseString) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
RUNTIME_ASSERT(output->HasFastElements());
@@ -109,6 +109,10 @@ RUNTIME_FUNCTION(Runtime_DateParseString) {
Handle<FixedArray> output_array(FixedArray::cast(output->elements()));
RUNTIME_ASSERT(output_array->length() >= DateParser::OUTPUT_SIZE);
+ Handle<String> str;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, str,
+ Object::ToString(isolate, input));
+
str = String::Flatten(str);
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 1cd524f17c..9f49e4d5d2 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -10,6 +10,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/debug-scopes.h"
#include "src/frames-inl.h"
+#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
@@ -221,7 +222,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
Handle<JSObject> promise = Handle<JSObject>::cast(object);
Handle<Object> status_obj =
- DebugGetProperty(promise, isolate->promise_status());
+ DebugGetProperty(promise, isolate->factory()->promise_status_symbol());
RUNTIME_ASSERT_HANDLIFIED(status_obj->IsSmi(), JSArray);
const char* status = "rejected";
int status_val = Handle<Smi>::cast(status_obj)->value();
@@ -244,7 +245,7 @@ MaybeHandle<JSArray> Runtime::GetInternalProperties(Isolate* isolate,
result->set(1, *status_str);
Handle<Object> value_obj =
- DebugGetProperty(promise, isolate->promise_value());
+ DebugGetProperty(promise, isolate->factory()->promise_value_symbol());
Handle<String> promise_value =
factory->NewStringFromAsciiChecked("[[PromiseValue]]");
result->set(2, *promise_value);
@@ -533,6 +534,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// Get scope info and read from it for local variable information.
Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+ RUNTIME_ASSERT(function->IsSubjectToDebugging());
Handle<SharedFunctionInfo> shared(function->shared());
Handle<ScopeInfo> scope_info(shared->scope_info());
DCHECK(*scope_info != ScopeInfo::Empty(isolate));
@@ -566,13 +568,11 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
if (scope_info->LocalIsSynthetic(i)) continue;
Handle<String> name(scope_info->LocalName(i));
VariableMode mode;
- VariableLocation location;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
locals->set(local * 2, *name);
int context_slot_index = ScopeInfo::ContextSlotIndex(
- scope_info, name, &mode, &location, &init_flag, &maybe_assigned_flag);
- DCHECK(VariableLocation::CONTEXT == location);
+ scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
Object* value = context->get(context_slot_index);
locals->set(local * 2 + 1, value);
local++;
@@ -712,8 +712,8 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// THIS MUST BE DONE LAST SINCE WE MIGHT ADVANCE
// THE FRAME ITERATOR TO WRAP THE RECEIVER.
Handle<Object> receiver(it.frame()->receiver(), isolate);
- if (!receiver->IsJSObject() && is_sloppy(shared->language_mode()) &&
- !function->IsBuiltin()) {
+ DCHECK(!function->IsBuiltin());
+ if (!receiver->IsJSObject() && is_sloppy(shared->language_mode())) {
// If the receiver is not a JSObject and the function is not a
// builtin or strict-mode we have hit an optimization where a
// value object is not converted into a wrapped JS objects. To
@@ -1602,13 +1602,10 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
CONVERT_ARG_HANDLE_CHECKED(String, script_name, 0);
Handle<Script> found;
- Heap* heap = isolate->heap();
{
- HeapIterator iterator(heap);
- HeapObject* obj = NULL;
- while ((obj = iterator.next()) != NULL) {
- if (!obj->IsScript()) continue;
- Script* script = Script::cast(obj);
+ Script::Iterator iterator(isolate);
+ Script* script = NULL;
+ while ((script = iterator.Next()) != NULL) {
if (!script->name()->IsString()) continue;
String* name = String::cast(script->name());
if (name->Equals(*script_name)) {
@@ -1618,7 +1615,7 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
}
}
- if (found.is_null()) return heap->undefined_value();
+ if (found.is_null()) return isolate->heap()->undefined_value();
return *Script::GetWrapper(found);
}
@@ -1637,7 +1634,7 @@ RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
// or not even a function.
return isolate->heap()->ToBoolean(
callback->IsJSFunction() &&
- (!JSFunction::cast(callback)->IsBuiltin() ||
+ (JSFunction::cast(callback)->IsSubjectToDebugging() ||
JSFunction::cast(callback)->shared()->bound()));
}
diff --git a/deps/v8/src/runtime/runtime-forin.cc b/deps/v8/src/runtime/runtime-forin.cc
index ecd55d172a..ff6804c8fb 100644
--- a/deps/v8/src/runtime/runtime-forin.cc
+++ b/deps/v8/src/runtime/runtime-forin.cc
@@ -28,7 +28,7 @@ RUNTIME_FUNCTION(Runtime_ForInFilter) {
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
// TODO(turbofan): Fast case for array indices.
Handle<Name> name;
- if (!Runtime::ToName(isolate, key).ToHandle(&name)) {
+ if (!Object::ToName(isolate, key).ToHandle(&name)) {
return isolate->heap()->exception();
}
Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
@@ -53,7 +53,7 @@ RUNTIME_FUNCTION(Runtime_ForInNext) {
}
// TODO(turbofan): Fast case for array indices.
Handle<Name> name;
- if (!Runtime::ToName(isolate, key).ToHandle(&name)) {
+ if (!Object::ToName(isolate, key).ToHandle(&name)) {
return isolate->heap()->exception();
}
Maybe<bool> result = JSReceiver::HasProperty(receiver, name);
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index a368f1b14c..18a0865f27 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -7,32 +7,15 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/compiler.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/frames-inl.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/profiler/cpu-profiler.h"
namespace v8 {
namespace internal {
-RUNTIME_FUNCTION(Runtime_IsSloppyModeFunction) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
- if (!callable->IsJSFunction()) {
- HandleScope scope(isolate);
- Handle<Object> delegate;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, delegate, Execution::TryGetFunctionDelegate(
- isolate, Handle<JSReceiver>(callable)));
- callable = JSFunction::cast(*delegate);
- }
- JSFunction* function = JSFunction::cast(callable);
- SharedFunctionInfo* shared = function->shared();
- return isolate->heap()->ToBoolean(is_sloppy(shared->language_mode()));
-}
-
-
RUNTIME_FUNCTION(Runtime_FunctionGetName) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -176,7 +159,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
- RUNTIME_ASSERT(fun->should_have_prototype());
+ RUNTIME_ASSERT(fun->IsConstructor());
RETURN_FAILURE_ON_EXCEPTION(isolate,
Accessors::FunctionSetPrototype(fun, value));
return args[0]; // return TOS
@@ -192,12 +175,15 @@ RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
}
-RUNTIME_FUNCTION(Runtime_FunctionIsBuiltin) {
+RUNTIME_FUNCTION(Runtime_FunctionHidesSource) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
-
CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return isolate->heap()->ToBoolean(f->IsBuiltin());
+
+ SharedFunctionInfo* shared = f->shared();
+ bool hide_source = !shared->script()->IsScript() ||
+ Script::cast(shared->script())->hide_source();
+ return isolate->heap()->ToBoolean(hide_source);
}
@@ -212,7 +198,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
Handle<SharedFunctionInfo> source_shared(source->shared());
RUNTIME_ASSERT(!source_shared->bound());
- if (!Compiler::EnsureCompiled(source, KEEP_EXCEPTION)) {
+ if (!Compiler::Compile(source, KEEP_EXCEPTION)) {
return isolate->heap()->exception();
}
@@ -250,10 +236,12 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
// Make sure we get a fresh copy of the literal vector to avoid cross
// context contamination.
Handle<Context> context(source->context());
- int number_of_literals = source->NumberOfLiterals();
- Handle<FixedArray> literals =
- isolate->factory()->NewFixedArray(number_of_literals, TENURED);
target->set_context(*context);
+
+ int number_of_literals = source->NumberOfLiterals();
+ Handle<LiteralsArray> literals =
+ LiteralsArray::New(isolate, handle(target_shared->feedback_vector()),
+ number_of_literals, TENURED);
target->set_literals(*literals);
if (isolate->logger()->is_logging_code_events() ||
@@ -284,30 +272,10 @@ RUNTIME_FUNCTION(Runtime_SetNativeFlag) {
RUNTIME_FUNCTION(Runtime_IsConstructor) {
- HandleScope handles(isolate);
- RUNTIME_ASSERT(args.length() == 1);
-
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
-
- // TODO(caitp): implement this in a better/simpler way, allow inlining via TF
- if (object->IsJSFunction()) {
- Handle<JSFunction> func = Handle<JSFunction>::cast(object);
- bool should_have_prototype = func->should_have_prototype();
- if (func->shared()->bound()) {
- Handle<FixedArray> bound_args =
- Handle<FixedArray>(FixedArray::cast(func->function_bindings()));
- Handle<Object> bound_function(
- JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
- isolate);
- if (bound_function->IsJSFunction()) {
- Handle<JSFunction> bound = Handle<JSFunction>::cast(bound_function);
- DCHECK(!bound->shared()->bound());
- should_have_prototype = bound->should_have_prototype();
- }
- }
- return isolate->heap()->ToBoolean(should_have_prototype);
- }
- return isolate->heap()->false_value();
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ return isolate->heap()->ToBoolean(object->IsConstructor());
}
@@ -327,7 +295,7 @@ RUNTIME_FUNCTION(Runtime_SetForceInlineFlag) {
// Find the arguments of the JavaScript function invocation that called
// into C++ code. Collect these in a newly allocated array of handles (possibly
// prefixed by a number of empty handles).
-static base::SmartArrayPointer<Handle<Object> > GetCallerArguments(
+base::SmartArrayPointer<Handle<Object>> Runtime::GetCallerArguments(
Isolate* isolate, int prefix_argc, int* total_argc) {
// Find frame containing arguments passed to the caller.
JavaScriptFrameIterator it(isolate);
@@ -399,8 +367,8 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
bound_function->shared()->set_inferred_name(isolate->heap()->empty_string());
// Get all arguments of calling function (Function.prototype.bind).
int argc = 0;
- base::SmartArrayPointer<Handle<Object> > arguments =
- GetCallerArguments(isolate, 0, &argc);
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ Runtime::GetCallerArguments(isolate, 0, &argc);
// Don't count the this-arg.
if (argc > 0) {
RUNTIME_ASSERT(arguments[0].is_identical_to(this_object));
@@ -443,8 +411,10 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
RUNTIME_ASSERT(bound_function->RemovePrototype());
// The new function should have the same [[Prototype]] as the bindee.
- Handle<Map> bound_function_map(
- isolate->native_context()->bound_function_map());
+ Handle<Map> bound_function_map =
+ bindee->IsConstructor()
+ ? isolate->bound_function_with_constructor_map()
+ : isolate->bound_function_without_constructor_map();
PrototypeIterator iter(isolate, bindee);
Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
if (bound_function_map->prototype() != *proto) {
@@ -452,6 +422,7 @@ RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
REGULAR_PROTOTYPE);
}
JSObject::MigrateToMap(bound_function, bound_function_map);
+ DCHECK_EQ(bindee->IsConstructor(), bound_function->IsConstructor());
Handle<String> length_string = isolate->factory()->length_string();
// These attributes must be kept in sync with how the bootstrapper
@@ -501,23 +472,16 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
!Handle<JSFunction>::cast(bound_function)->shared()->bound());
int total_argc = 0;
- base::SmartArrayPointer<Handle<Object> > param_data =
- GetCallerArguments(isolate, bound_argc, &total_argc);
+ base::SmartArrayPointer<Handle<Object>> param_data =
+ Runtime::GetCallerArguments(isolate, bound_argc, &total_argc);
for (int i = 0; i < bound_argc; i++) {
param_data[i] = Handle<Object>(
bound_args->get(JSFunction::kBoundArgumentsStartIndex + i), isolate);
}
- if (!bound_function->IsJSFunction()) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, bound_function,
- Execution::TryGetConstructorDelegate(isolate, bound_function));
- }
- DCHECK(bound_function->IsJSFunction());
-
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, Execution::New(Handle<JSFunction>::cast(bound_function),
+ isolate, result, Execution::New(isolate, bound_function, bound_function,
total_argc, param_data.get()));
return *result;
}
@@ -525,32 +489,18 @@ RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
RUNTIME_FUNCTION(Runtime_Call) {
HandleScope scope(isolate);
- DCHECK(args.length() >= 2);
- int argc = args.length() - 2;
- CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
- Object* receiver = args[0];
-
- // If there are too many arguments, allocate argv via malloc.
- const int argv_small_size = 10;
- Handle<Object> argv_small_buffer[argv_small_size];
- base::SmartArrayPointer<Handle<Object> > argv_large_buffer;
- Handle<Object>* argv = argv_small_buffer;
- if (argc > argv_small_size) {
- argv = new Handle<Object>[argc];
- if (argv == NULL) return isolate->StackOverflow();
- argv_large_buffer = base::SmartArrayPointer<Handle<Object> >(argv);
- }
-
+ DCHECK_LE(2, args.length());
+ int const argc = args.length() - 2;
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, target, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+ ScopedVector<Handle<Object>> argv(argc);
for (int i = 0; i < argc; ++i) {
- argv[i] = Handle<Object>(args[1 + i], isolate);
+ argv[i] = args.at<Object>(2 + i);
}
-
- Handle<JSReceiver> hfun(fun);
- Handle<Object> hreceiver(receiver, isolate);
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Execution::Call(isolate, hfun, hreceiver, argc, argv, true));
+ Execution::Call(isolate, target, receiver, argc, argv.start()));
return *result;
}
@@ -587,30 +537,11 @@ RUNTIME_FUNCTION(Runtime_Apply) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- Execution::Call(isolate, fun, receiver, argc, argv, true));
+ isolate, result, Execution::Call(isolate, fun, receiver, argc, argv));
return *result;
}
-RUNTIME_FUNCTION(Runtime_GetFunctionDelegate) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- RUNTIME_ASSERT(!object->IsJSFunction());
- return *Execution::GetFunctionDelegate(isolate, object);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetConstructorDelegate) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- RUNTIME_ASSERT(!object->IsJSFunction());
- return *Execution::GetConstructorDelegate(isolate, object);
-}
-
-
RUNTIME_FUNCTION(Runtime_GetOriginalConstructor) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
@@ -621,9 +552,36 @@ RUNTIME_FUNCTION(Runtime_GetOriginalConstructor) {
}
+// TODO(bmeurer): Kill %_CallFunction ASAP as it is almost never used
+// correctly because of the weird semantics underneath.
RUNTIME_FUNCTION(Runtime_CallFunction) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_Call(args, isolate);
+ HandleScope scope(isolate);
+ DCHECK(args.length() >= 2);
+ int argc = args.length() - 2;
+ CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
+ Object* receiver = args[0];
+
+ // If there are too many arguments, allocate argv via malloc.
+ const int argv_small_size = 10;
+ Handle<Object> argv_small_buffer[argv_small_size];
+ base::SmartArrayPointer<Handle<Object>> argv_large_buffer;
+ Handle<Object>* argv = argv_small_buffer;
+ if (argc > argv_small_size) {
+ argv = new Handle<Object>[argc];
+ if (argv == NULL) return isolate->StackOverflow();
+ argv_large_buffer = base::SmartArrayPointer<Handle<Object>>(argv);
+ }
+
+ for (int i = 0; i < argc; ++i) {
+ argv[i] = Handle<Object>(args[1 + i], isolate);
+ }
+
+ Handle<JSReceiver> hfun(fun);
+ Handle<Object> hreceiver(receiver, isolate);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Execution::Call(isolate, hfun, hreceiver, argc, argv));
+ return *result;
}
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-i18n.cc
index 73d511074a..8b0c98f161 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-i18n.cc
@@ -11,6 +11,7 @@
#include "src/arguments.h"
#include "src/factory.h"
#include "src/i18n.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "unicode/brkiter.h"
@@ -351,8 +352,7 @@ RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Execution::ToNumber(isolate, date));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(date));
icu::SimpleDateFormat* date_format =
DateFormat::UnpackDateFormat(isolate, date_format_holder);
@@ -445,8 +445,7 @@ RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
Handle<Object> value;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value,
- Execution::ToNumber(isolate, number));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, Object::ToNumber(number));
icu::DecimalFormat* number_format =
NumberFormat::UnpackNumberFormat(isolate, number_format_holder);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 01e3e913af..90d5532af3 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -9,6 +9,7 @@
#include "src/conversions.h"
#include "src/debug/debug.h"
#include "src/frames-inl.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/parser.h"
#include "src/prettyprinter.h"
@@ -24,32 +25,53 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
}
-RUNTIME_FUNCTION(Runtime_ImportToRuntime) {
+RUNTIME_FUNCTION(Runtime_ExportFromRuntime) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
- Bootstrapper::ImportNatives(isolate, container);
- return isolate->heap()->undefined_value();
+ JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
+ "ExportFromRuntime");
+ Bootstrapper::ExportFromRuntime(isolate, container);
+ JSObject::MigrateSlowToFast(container, 0, "ExportFromRuntime");
+ return *container;
}
-RUNTIME_FUNCTION(Runtime_ImportExperimentalToRuntime) {
+RUNTIME_FUNCTION(Runtime_ExportExperimentalFromRuntime) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
- Bootstrapper::ImportExperimentalNatives(isolate, container);
- return isolate->heap()->undefined_value();
+ JSObject::NormalizeProperties(container, KEEP_INOBJECT_PROPERTIES, 10,
+ "ExportExperimentalFromRuntime");
+ Bootstrapper::ExportExperimentalFromRuntime(isolate, container);
+ JSObject::MigrateSlowToFast(container, 0, "ExportExperimentalFromRuntime");
+ return *container;
}
-RUNTIME_FUNCTION(Runtime_InstallJSBuiltins) {
+RUNTIME_FUNCTION(Runtime_InstallToContext) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, container, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+ RUNTIME_ASSERT(array->HasFastElements());
RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
- Bootstrapper::InstallJSBuiltins(isolate, container);
+ Handle<Context> native_context = isolate->native_context();
+ Handle<FixedArray> fixed_array(FixedArray::cast(array->elements()));
+ int length = Smi::cast(array->length())->value();
+ for (int i = 0; i < length; i += 2) {
+ RUNTIME_ASSERT(fixed_array->get(i)->IsString());
+ Handle<String> name(String::cast(fixed_array->get(i)));
+ RUNTIME_ASSERT(fixed_array->get(i + 1)->IsJSObject());
+ Handle<JSObject> object(JSObject::cast(fixed_array->get(i + 1)));
+ int index = Context::ImportedFieldIndexForName(name);
+ if (index == Context::kNotFound) {
+ index = Context::IntrinsicIndexForName(name);
+ }
+ RUNTIME_ASSERT(index != Context::kNotFound);
+ native_context->set(index, *object);
+ }
return isolate->heap()->undefined_value();
}
@@ -68,6 +90,13 @@ RUNTIME_FUNCTION(Runtime_ReThrow) {
}
+RUNTIME_FUNCTION(Runtime_ThrowStackOverflow) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(0, args.length());
+ return isolate->StackOverflow();
+}
+
+
RUNTIME_FUNCTION(Runtime_UnwindAndFindExceptionHandler) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
@@ -172,12 +201,6 @@ RUNTIME_FUNCTION(Runtime_PromiseRevokeReject) {
}
-RUNTIME_FUNCTION(Runtime_PromiseHasHandlerSymbol) {
- DCHECK(args.length() == 0);
- return isolate->heap()->promise_has_handler_symbol();
-}
-
-
RUNTIME_FUNCTION(Runtime_StackGuard) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
@@ -245,29 +268,6 @@ RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
}
-RUNTIME_FUNCTION(Runtime_RenderCallSite) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 0);
- MessageLocation location;
- isolate->ComputeLocation(&location);
- if (location.start_pos() == -1) return isolate->heap()->empty_string();
-
- Zone zone;
- base::SmartPointer<ParseInfo> info(
- location.function()->shared()->is_function()
- ? new ParseInfo(&zone, location.function())
- : new ParseInfo(&zone, location.script()));
-
- if (!Parser::ParseStatic(info.get())) {
- isolate->clear_pending_exception();
- return isolate->heap()->empty_string();
- }
- CallPrinter printer(isolate, &zone);
- const char* string = printer.Print(info->literal(), location.start_pos());
- return *isolate->factory()->NewStringFromAsciiChecked(string);
-}
-
-
RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -311,16 +311,14 @@ RUNTIME_FUNCTION(Runtime_FormatMessageString) {
}
-#define CALLSITE_GET(NAME, RETURN) \
- RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) { \
- HandleScope scope(isolate); \
- DCHECK(args.length() == 3); \
- CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 0); \
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 1); \
- CONVERT_INT32_ARG_CHECKED(pos, 2); \
- Handle<String> result; \
- CallSite call_site(receiver, fun, pos); \
- return RETURN(call_site.NAME(isolate), isolate); \
+#define CALLSITE_GET(NAME, RETURN) \
+ RUNTIME_FUNCTION(Runtime_CallSite##NAME##RT) { \
+ HandleScope scope(isolate); \
+ DCHECK(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(JSObject, call_site_obj, 0); \
+ Handle<String> result; \
+ CallSite call_site(isolate, call_site_obj); \
+ return RETURN(call_site.NAME(), isolate); \
}
static inline Object* ReturnDereferencedHandle(Handle<Object> obj,
@@ -413,5 +411,39 @@ RUNTIME_FUNCTION(Runtime_GetCodeStubExportsObject) {
return isolate->heap()->code_stub_exports_object();
}
+
+namespace {
+
+Handle<String> RenderCallSite(Isolate* isolate, Handle<Object> object) {
+ MessageLocation location;
+ if (isolate->ComputeLocation(&location)) {
+ Zone zone;
+ base::SmartPointer<ParseInfo> info(
+ location.function()->shared()->is_function()
+ ? new ParseInfo(&zone, location.function())
+ : new ParseInfo(&zone, location.script()));
+ if (Parser::ParseStatic(info.get())) {
+ CallPrinter printer(isolate, &zone);
+ const char* string = printer.Print(info->literal(), location.start_pos());
+ return isolate->factory()->NewStringFromAsciiChecked(string);
+ } else {
+ isolate->clear_pending_exception();
+ }
+ }
+ return Object::TypeOf(isolate, object);
+}
+
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_ThrowCalledNonCallable) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<String> callsite = RenderCallSite(isolate, object);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callsite));
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
new file mode 100644
index 0000000000..e0a171267f
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/runtime/runtime-utils.h"
+
+#include "src/arguments.h"
+#include "src/isolate-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterEquals) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::Equals(x, y);
+ if (result.IsJust()) {
+ return isolate->heap()->ToBoolean(result.FromJust());
+ } else {
+ return isolate->heap()->exception();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterNotEquals) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::Equals(x, y);
+ if (result.IsJust()) {
+ return isolate->heap()->ToBoolean(!result.FromJust());
+ } else {
+ return isolate->heap()->exception();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterLessThan) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::LessThan(x, y);
+ if (result.IsJust()) {
+ return isolate->heap()->ToBoolean(result.FromJust());
+ } else {
+ return isolate->heap()->exception();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterGreaterThan) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::GreaterThan(x, y);
+ if (result.IsJust()) {
+ return isolate->heap()->ToBoolean(result.FromJust());
+ } else {
+ return isolate->heap()->exception();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterLessThanOrEqual) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::LessThanOrEqual(x, y);
+ if (result.IsJust()) {
+ return isolate->heap()->ToBoolean(result.FromJust());
+ } else {
+ return isolate->heap()->exception();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterGreaterThanOrEqual) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::GreaterThanOrEqual(x, y);
+ if (result.IsJust()) {
+ return isolate->heap()->ToBoolean(result.FromJust());
+ } else {
+ return isolate->heap()->exception();
+ }
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterStrictEquals) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ CONVERT_ARG_CHECKED(Object, y, 1);
+ return isolate->heap()->ToBoolean(x->StrictEquals(y));
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterStrictNotEquals) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ CONVERT_ARG_CHECKED(Object, y, 1);
+ return isolate->heap()->ToBoolean(!x->StrictEquals(y));
+}
+
+
+RUNTIME_FUNCTION(Runtime_InterpreterToBoolean) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, x, 0);
+ return isolate->heap()->ToBoolean(x->BooleanValue());
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-json.cc b/deps/v8/src/runtime/runtime-json.cc
index 64a42bfede..07232d59b8 100644
--- a/deps/v8/src/runtime/runtime-json.cc
+++ b/deps/v8/src/runtime/runtime-json.cc
@@ -6,6 +6,7 @@
#include "src/arguments.h"
#include "src/char-predicates-inl.h"
+#include "src/isolate-inl.h"
#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/objects-inl.h"
@@ -38,9 +39,11 @@ RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
RUNTIME_FUNCTION(Runtime_ParseJson) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
-
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ Handle<String> source;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
+ Object::ToString(isolate, object));
source = String::Flatten(source);
// Optimized fast case where we only have Latin1 characters.
Handle<Object> result;
@@ -50,5 +53,6 @@ RUNTIME_FUNCTION(Runtime_ParseJson) {
: JsonParser<false>::Parse(source));
return *result;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index f434747e28..903e2feb53 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -7,6 +7,7 @@
#include "src/allocation-site-scopes.h"
#include "src/arguments.h"
#include "src/ast.h"
+#include "src/isolate-inl.h"
#include "src/parser.h"
#include "src/runtime/runtime.h"
@@ -33,12 +34,12 @@ static Handle<Map> ComputeObjectLiteralMap(
}
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<FixedArray> literals,
+ Isolate* isolate, Handle<LiteralsArray> literals,
Handle<FixedArray> constant_properties, bool is_strong);
MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
- Isolate* isolate, Handle<FixedArray> literals,
+ Isolate* isolate, Handle<LiteralsArray> literals,
Handle<FixedArray> constant_properties, bool should_have_fast_elements,
bool has_function_literal, bool is_strong) {
Handle<Context> context = isolate->native_context();
@@ -138,7 +139,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
- Isolate* isolate, Handle<FixedArray> literals,
+ Isolate* isolate, Handle<LiteralsArray> literals,
Handle<FixedArray> elements, bool is_strong) {
// Create the JSArray.
Handle<JSFunction> constructor = isolate->array_function();
@@ -214,7 +215,7 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
- Isolate* isolate, Handle<FixedArray> literals, Handle<FixedArray> array,
+ Isolate* isolate, Handle<LiteralsArray> literals, Handle<FixedArray> array,
bool is_strong) {
Handle<FixedArray> elements = CompileTimeValue::GetElements(array);
const bool kHasNoFunctionLiteral = false;
@@ -238,7 +239,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
@@ -247,10 +248,11 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
bool is_strong = (flags & ObjectLiteral::kIsStrong) != 0;
- RUNTIME_ASSERT(literals_index >= 0 && literals_index < literals->length());
+ RUNTIME_ASSERT(literals_index >= 0 &&
+ literals_index < literals->literals_count());
// Check if boilerplate exists. If not, create it first.
- Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<Object> literal_site(literals->literal(literals_index), isolate);
Handle<AllocationSite> site;
Handle<JSObject> boilerplate;
if (*literal_site == isolate->heap()->undefined_value()) {
@@ -269,7 +271,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
creation_context.ExitScope(site, boilerplate);
// Update the functions literal and return the boilerplate.
- literals->set(literals_index, *site);
+ literals->set_literal(literals_index, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
boilerplate =
@@ -288,10 +290,10 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
- Isolate* isolate, Handle<FixedArray> literals, int literals_index,
+ Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
Handle<FixedArray> elements, bool is_strong) {
// Check if boilerplate exists. If not, create it first.
- Handle<Object> literal_site(literals->get(literals_index), isolate);
+ Handle<Object> literal_site(literals->literal(literals_index), isolate);
Handle<AllocationSite> site;
if (*literal_site == isolate->heap()->undefined_value()) {
DCHECK(*elements != isolate->heap()->empty_fixed_array());
@@ -310,7 +312,7 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
}
creation_context.ExitScope(site, Handle<JSObject>::cast(boilerplate));
- literals->set(literals_index, *site);
+ literals->set_literal(literals_index, *site);
} else {
site = Handle<AllocationSite>::cast(literal_site);
}
@@ -319,13 +321,12 @@ MUST_USE_RESULT static MaybeHandle<AllocationSite> GetLiteralAllocationSite(
}
-static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate,
- Handle<FixedArray> literals,
- int literals_index,
- Handle<FixedArray> elements,
- int flags) {
+static MaybeHandle<JSObject> CreateArrayLiteralImpl(
+ Isolate* isolate, Handle<LiteralsArray> literals, int literals_index,
+ Handle<FixedArray> elements, int flags) {
RUNTIME_ASSERT_HANDLIFIED(
- literals_index >= 0 && literals_index < literals->length(), JSObject);
+ literals_index >= 0 && literals_index < literals->literals_count(),
+ JSObject);
Handle<AllocationSite> site;
bool is_strong = (flags & ArrayLiteral::kIsStrong) != 0;
ASSIGN_RETURN_ON_EXCEPTION(
@@ -351,7 +352,7 @@ static MaybeHandle<JSObject> CreateArrayLiteralImpl(Isolate* isolate,
RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
@@ -367,7 +368,7 @@ RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(literals_index, 1);
CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
@@ -386,10 +387,10 @@ RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
CONVERT_SMI_ARG_CHECKED(store_index, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3);
+ CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 3);
CONVERT_SMI_ARG_CHECKED(literal_index, 4);
- Object* raw_literal_cell = literals->get(literal_index);
+ Object* raw_literal_cell = literals->literal(literal_index);
JSArray* boilerplate = NULL;
if (raw_literal_cell->IsAllocationSite()) {
AllocationSite* site = AllocationSite::cast(raw_literal_cell);
@@ -423,10 +424,8 @@ RUNTIME_FUNCTION(Runtime_StoreArrayLiteralElement) {
? FAST_HOLEY_ELEMENTS
: FAST_ELEMENTS;
JSObject::TransitionElementsKind(object, transitioned_kind);
- ElementsKind boilerplate_elements_kind =
- boilerplate_object->GetElementsKind();
- if (IsMoreGeneralElementsKindTransition(boilerplate_elements_kind,
- transitioned_kind)) {
+ if (IsMoreGeneralElementsKindTransition(
+ boilerplate_object->GetElementsKind(), transitioned_kind)) {
JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
}
}
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index 3b8dad9b5e..947ef2c29b 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -9,6 +9,7 @@
#include "src/debug/debug-frames.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
+#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
namespace v8 {
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index 49734ba8dd..177b3ff584 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -9,13 +9,6 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
-
-#ifndef _STLP_VENDOR_CSTD
-// STLPort doesn't import fpclassify and isless into the std namespace.
-using std::fpclassify;
-using std::isless;
-#endif
-
namespace v8 {
namespace internal {
@@ -112,81 +105,11 @@ RUNTIME_FUNCTION(Runtime_IsValidSmi) {
}
-static bool AreDigits(const uint8_t* s, int from, int to) {
- for (int i = from; i < to; i++) {
- if (s[i] < '0' || s[i] > '9') return false;
- }
-
- return true;
-}
-
-
-static int ParseDecimalInteger(const uint8_t* s, int from, int to) {
- DCHECK(to - from < 10); // Overflow is not possible.
- DCHECK(from < to);
- int d = s[from] - '0';
-
- for (int i = from + 1; i < to; i++) {
- d = 10 * d + (s[i] - '0');
- }
-
- return d;
-}
-
-
RUNTIME_FUNCTION(Runtime_StringToNumber) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
- subject = String::Flatten(subject);
-
- // Fast case: short integer or some sorts of junk values.
- if (subject->IsSeqOneByteString()) {
- int len = subject->length();
- if (len == 0) return Smi::FromInt(0);
-
- DisallowHeapAllocation no_gc;
- uint8_t const* data = Handle<SeqOneByteString>::cast(subject)->GetChars();
- bool minus = (data[0] == '-');
- int start_pos = (minus ? 1 : 0);
-
- if (start_pos == len) {
- return isolate->heap()->nan_value();
- } else if (data[start_pos] > '9') {
- // Fast check for a junk value. A valid string may start from a
- // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit
- // or the 'I' character ('Infinity'). All of that have codes not greater
- // than '9' except 'I' and &nbsp;.
- if (data[start_pos] != 'I' && data[start_pos] != 0xa0) {
- return isolate->heap()->nan_value();
- }
- } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) {
- // The maximal/minimal smi has 10 digits. If the string has less digits
- // we know it will fit into the smi-data type.
- int d = ParseDecimalInteger(data, start_pos, len);
- if (minus) {
- if (d == 0) return isolate->heap()->minus_zero_value();
- d = -d;
- } else if (!subject->HasHashCode() && len <= String::kMaxArrayIndexSize &&
- (len == 1 || data[0] != '0')) {
- // String hash is not calculated yet but all the data are present.
- // Update the hash field to speed up sequential convertions.
- uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
-#ifdef DEBUG
- subject->Hash(); // Force hash calculation.
- DCHECK_EQ(static_cast<int>(subject->hash_field()),
- static_cast<int>(hash));
-#endif
- subject->set_hash_field(hash);
- }
- return Smi::FromInt(d);
- }
- }
-
- // Slower case.
- int flags = ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY;
- return *isolate->factory()->NewNumber(
- StringToDouble(isolate->unicode_cache(), subject, flags));
+ return *String::ToNumber(subject);
}
@@ -248,15 +171,6 @@ RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
}
-RUNTIME_FUNCTION(Runtime_NumberToInteger) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(number, 0);
- return *isolate->factory()->NewNumber(DoubleToInteger(number));
-}
-
-
RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -290,65 +204,6 @@ RUNTIME_FUNCTION(Runtime_NumberToSmi) {
}
-RUNTIME_FUNCTION(Runtime_NumberAdd) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x + y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberSub) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x - y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberMul) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x * y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberUnaryMinus) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(-x);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberDiv) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(x / y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberMod) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return *isolate->factory()->NewNumber(modulo(x, y));
-}
-
-
RUNTIME_FUNCTION(Runtime_NumberImul) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -362,100 +217,6 @@ RUNTIME_FUNCTION(Runtime_NumberImul) {
}
-RUNTIME_FUNCTION(Runtime_NumberOr) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x | y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberAnd) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x & y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberXor) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x ^ y);
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberShl) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(x << (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberShr) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromUint(x >> (y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberSar) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
- CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
- return *isolate->factory()->NewNumberFromInt(
- ArithmeticShiftRight(x, y & 0x1f));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberEquals) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- if (std::isnan(x)) return Smi::FromInt(NOT_EQUAL);
- if (std::isnan(y)) return Smi::FromInt(NOT_EQUAL);
- if (x == y) return Smi::FromInt(EQUAL);
- Object* result;
- if ((fpclassify(x) == FP_ZERO) && (fpclassify(y) == FP_ZERO)) {
- result = Smi::FromInt(EQUAL);
- } else {
- result = Smi::FromInt(NOT_EQUAL);
- }
- return result;
-}
-
-
-RUNTIME_FUNCTION(Runtime_NumberCompare) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 3);
-
- CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, uncomparable_result, 2)
- if (std::isnan(x) || std::isnan(y)) return *uncomparable_result;
- if (x == y) return Smi::FromInt(EQUAL);
- if (isless(x, y)) return Smi::FromInt(LESS);
- return Smi::FromInt(GREATER);
-}
-
-
// Compare two Smis as if they were converted to strings and then
// compared lexicographically.
RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
@@ -547,19 +308,11 @@ RUNTIME_FUNCTION(Runtime_IsSmi) {
}
-RUNTIME_FUNCTION(Runtime_IsNonNegativeSmi) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- return isolate->heap()->ToBoolean(obj->IsSmi() &&
- Smi::cast(obj)->value() >= 0);
-}
-
-
RUNTIME_FUNCTION(Runtime_GetRootNaN) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
return isolate->heap()->nan_value();
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 26f74efd15..4782a31430 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -7,6 +7,7 @@
#include "src/arguments.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/runtime/runtime.h"
@@ -14,18 +15,6 @@ namespace v8 {
namespace internal {
-MaybeHandle<Name> Runtime::ToName(Isolate* isolate, Handle<Object> key) {
- if (key->IsName()) {
- return Handle<Name>::cast(key);
- } else {
- Handle<Object> converted;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, converted,
- Execution::ToString(isolate, key), Name);
- return Handle<Name>::cast(converted);
- }
-}
-
-
MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -45,7 +34,8 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
// Convert the key to a name - possibly by calling back into JavaScript.
Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+ Object);
// Check if the name is trivially convertible to an index and get
// the element if so.
@@ -59,9 +49,10 @@ MaybeHandle<Object> Runtime::GetObjectProperty(Isolate* isolate,
}
-MaybeHandle<Object> Runtime::KeyedGetObjectProperty(
- Isolate* isolate, Handle<Object> receiver_obj, Handle<Object> key_obj,
- LanguageMode language_mode) {
+static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
+ Handle<Object> receiver_obj,
+ Handle<Object> key_obj,
+ LanguageMode language_mode) {
// Fast cases for getting named properties of the receiver JSObject
// itself.
//
@@ -135,7 +126,8 @@ MaybeHandle<Object> Runtime::KeyedGetObjectProperty(
}
// Fall back to GetObjectProperty.
- return GetObjectProperty(isolate, receiver_obj, key_obj, language_mode);
+ return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj,
+ language_mode);
}
@@ -150,7 +142,8 @@ MaybeHandle<Object> Runtime::DeleteObjectProperty(Isolate* isolate,
}
Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+ Object);
return JSReceiver::DeletePropertyOrElement(receiver, name, language_mode);
}
@@ -175,7 +168,8 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
Handle<Name> name;
- ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, name, Object::ToName(isolate, key),
+ Object);
LookupIterator it = LookupIterator::PropertyOrElement(isolate, object, name);
return Object::SetProperty(&it, value, language_mode,
@@ -183,34 +177,24 @@ MaybeHandle<Object> Runtime::SetObjectProperty(Isolate* isolate,
}
-MaybeHandle<Object> Runtime::GetPrototype(Isolate* isolate,
- Handle<Object> obj) {
+RUNTIME_FUNCTION(Runtime_GetPrototype) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
do {
if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() &&
- !isolate->MayAccess(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)))) {
- return isolate->factory()->null_value();
+ !isolate->MayAccess(PrototypeIterator::GetCurrent<JSObject>(iter))) {
+ return isolate->heap()->null_value();
}
iter.AdvanceIgnoringProxies();
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return PrototypeIterator::GetCurrent(iter);
+ return *PrototypeIterator::GetCurrent(iter);
}
} while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
- return PrototypeIterator::GetCurrent(iter);
-}
-
-
-RUNTIME_FUNCTION(Runtime_GetPrototype) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
- Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
- Runtime::GetPrototype(isolate, obj));
- return *result;
+ return *PrototypeIterator::GetCurrent(iter);
}
@@ -262,16 +246,6 @@ RUNTIME_FUNCTION(Runtime_SetPrototype) {
}
-RUNTIME_FUNCTION(Runtime_IsInPrototypeChain) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 2);
- // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
- CONVERT_ARG_CHECKED(Object, O, 0);
- CONVERT_ARG_CHECKED(Object, V, 1);
- return isolate->heap()->ToBoolean(V->HasInPrototypeChain(isolate, O));
-}
-
-
// Enumerator used as indices into the array returned from GetOwnProperty
enum PropertyDescriptorIndices {
IS_ACCESSOR_INDEX,
@@ -423,8 +397,7 @@ RUNTIME_FUNCTION(Runtime_LoadGlobalViaContext) {
DCHECK(script_context->get(slot)->IsPropertyCell());
// Lookup the named property on the global object.
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(script_context->extension()),
- isolate);
+ Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
Handle<GlobalObject> global_object(script_context->global_object(), isolate);
LookupIterator it(global_object, name, LookupIterator::HIDDEN);
@@ -458,8 +431,7 @@ Object* StoreGlobalViaContext(Isolate* isolate, int slot, Handle<Object> value,
DCHECK(script_context->get(slot)->IsPropertyCell());
// Lookup the named property on the global object.
- Handle<ScopeInfo> scope_info(ScopeInfo::cast(script_context->extension()),
- isolate);
+ Handle<ScopeInfo> scope_info(script_context->scope_info(), isolate);
Handle<Name> name(scope_info->ContextSlotName(slot), isolate);
Handle<GlobalObject> global_object(script_context->global_object(), isolate);
LookupIterator it(global_object, name, LookupIterator::HIDDEN);
@@ -549,7 +521,7 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::KeyedGetObjectProperty(isolate, receiver_obj, key_obj, SLOPPY));
+ KeyedGetObjectProperty(isolate, receiver_obj, key_obj, SLOPPY));
return *result;
}
@@ -564,7 +536,7 @@ RUNTIME_FUNCTION(Runtime_KeyedGetPropertyStrong) {
Handle<Object> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
- Runtime::KeyedGetObjectProperty(isolate, receiver_obj, key_obj, STRONG));
+ KeyedGetObjectProperty(isolate, receiver_obj, key_obj, STRONG));
return *result;
}
@@ -773,25 +745,28 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
}
+// ES6 section 12.9.3, operator in.
RUNTIME_FUNCTION(Runtime_HasProperty) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
-
- Maybe<bool> maybe = JSReceiver::HasProperty(receiver, key);
- if (!maybe.IsJust()) return isolate->heap()->exception();
- return isolate->heap()->ToBoolean(maybe.FromJust());
-}
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, key, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 1);
+ // Check that {object} is actually a receiver.
+ if (!object->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidInOperatorUse, key, object));
+ }
+ Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-RUNTIME_FUNCTION(Runtime_HasElement) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
- CONVERT_SMI_ARG_CHECKED(index, 1);
+ // Convert the {key} to a name.
+ Handle<Name> name;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, name,
+ Object::ToName(isolate, key));
- Maybe<bool> maybe = JSReceiver::HasElement(receiver, index);
+ // Lookup the {name} on {receiver}.
+ Maybe<bool> maybe = JSReceiver::HasProperty(receiver, name);
if (!maybe.IsJust()) return isolate->heap()->exception();
return isolate->heap()->ToBoolean(maybe.FromJust());
}
@@ -1021,17 +996,24 @@ RUNTIME_FUNCTION(Runtime_OwnKeys) {
// a fresh clone on each invocation.
int length = contents->length();
Handle<FixedArray> copy = isolate->factory()->NewFixedArray(length);
- for (int i = 0; i < length; i++) {
- Object* entry = contents->get(i);
- if (entry->IsString()) {
- copy->set(i, entry);
- } else {
- DCHECK(entry->IsNumber());
- HandleScope scope(isolate);
- Handle<Object> entry_handle(entry, isolate);
- Handle<Object> entry_str =
- isolate->factory()->NumberToString(entry_handle);
- copy->set(i, *entry_str);
+ int offset = 0;
+ // Use an outer loop to avoid creating too many handles in the current
+ // handle scope.
+ while (offset < length) {
+ HandleScope scope(isolate);
+ offset += 100;
+ int end = Min(offset, length);
+ for (int i = offset - 100; i < end; i++) {
+ Object* entry = contents->get(i);
+ if (entry->IsString()) {
+ copy->set(i, entry);
+ } else {
+ DCHECK(entry->IsNumber());
+ Handle<Object> entry_handle(entry, isolate);
+ Handle<Object> entry_str =
+ isolate->factory()->NumberToString(entry_handle);
+ copy->set(i, *entry_str);
+ }
}
}
return *isolate->factory()->NewJSArrayWithElements(copy);
@@ -1050,14 +1032,6 @@ RUNTIME_FUNCTION(Runtime_ToFastProperties) {
}
-RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, value, 0);
- return *Object::ToObject(isolate, value).ToHandleChecked();
-}
-
-
RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
@@ -1082,9 +1056,8 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
Handle<JSFunction>::cast(original_constructor);
- // If function should not have prototype, construction is not allowed. In this
- // case generated code bailouts here, since function has no initial_map.
- if (!function->should_have_prototype() && !function->shared()->bound()) {
+ // Check that function is a constructor.
+ if (!function->IsConstructor()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotConstructor, constructor));
}
@@ -1112,7 +1085,7 @@ static Object* Runtime_NewObjectHelper(Isolate* isolate,
// The function should be compiled for the optimization hints to be
// available.
- Compiler::EnsureCompiled(function, CLEAR_EXCEPTION);
+ Compiler::Compile(function, CLEAR_EXCEPTION);
Handle<JSObject> result;
if (site.is_null()) {
@@ -1150,22 +1123,6 @@ RUNTIME_FUNCTION(Runtime_NewObject) {
}
-RUNTIME_FUNCTION(Runtime_NewObjectWithAllocationSite) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(Object, original_constructor, 2);
- CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 1);
- CONVERT_ARG_HANDLE_CHECKED(Object, feedback, 0);
- Handle<AllocationSite> site;
- if (feedback->IsAllocationSite()) {
- // The feedback can be an AllocationSite or undefined.
- site = Handle<AllocationSite>::cast(feedback);
- }
- return Runtime_NewObjectHelper(isolate, constructor, original_constructor,
- site);
-}
-
-
RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
@@ -1246,7 +1203,7 @@ RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
static bool IsValidAccessor(Handle<Object> obj) {
- return obj->IsUndefined() || obj->IsSpecFunction() || obj->IsNull();
+ return obj->IsUndefined() || obj->IsCallable() || obj->IsNull();
}
@@ -1375,21 +1332,6 @@ RUNTIME_FUNCTION(Runtime_ObjectEquals) {
}
-RUNTIME_FUNCTION(Runtime_IsObject) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(Object, obj, 0);
- if (!obj->IsHeapObject()) return isolate->heap()->false_value();
- if (obj->IsNull()) return isolate->heap()->true_value();
- if (obj->IsUndetectableObject()) return isolate->heap()->false_value();
- Map* map = HeapObject::cast(obj)->map();
- bool is_non_callable_spec_object =
- map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE &&
- map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE;
- return isolate->heap()->ToBoolean(is_non_callable_spec_object);
-}
-
-
RUNTIME_FUNCTION(Runtime_IsSpecObject) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -1461,6 +1403,105 @@ RUNTIME_FUNCTION(Runtime_ToObject) {
}
+RUNTIME_FUNCTION(Runtime_ToPrimitive) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ToPrimitive(input));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToPrimitive_Number) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::ToPrimitive(input, ToPrimitiveHint::kNumber));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToPrimitive_String) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::ToPrimitive(input, ToPrimitiveHint::kString));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToNumber) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::ToNumber(input));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToInteger) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ToInteger(isolate, input));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToLength) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ToLength(isolate, input));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToString) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ToString(isolate, input));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ToName) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ToName(isolate, input));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Equals) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ Maybe<bool> result = Object::Equals(x, y);
+ if (!result.IsJust()) return isolate->heap()->exception();
+ // TODO(bmeurer): Change this at some point to return true/false instead.
+ return Smi::FromInt(result.FromJust() ? EQUAL : NOT_EQUAL);
+}
+
+
RUNTIME_FUNCTION(Runtime_StrictEquals) {
SealHandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -1471,11 +1512,129 @@ RUNTIME_FUNCTION(Runtime_StrictEquals) {
}
+// TODO(bmeurer): Kill this special wrapper and use TF compatible LessThan,
+// GreaterThan, etc. which return true or false.
+RUNTIME_FUNCTION(Runtime_Compare) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, ncr, 2);
+ Maybe<ComparisonResult> result = Object::Compare(x, y);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kLessThan:
+ return Smi::FromInt(LESS);
+ case ComparisonResult::kEqual:
+ return Smi::FromInt(EQUAL);
+ case ComparisonResult::kGreaterThan:
+ return Smi::FromInt(GREATER);
+ case ComparisonResult::kUndefined:
+ return *ncr;
+ }
+ UNREACHABLE();
+ }
+ return isolate->heap()->exception();
+}
+
+
+// TODO(bmeurer): Kill this special wrapper and use TF compatible LessThan,
+// GreaterThan, etc. which return true or false.
+RUNTIME_FUNCTION(Runtime_Compare_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, x, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, y, 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, ncr, 2);
+ Maybe<ComparisonResult> result = Object::Compare(x, y, Strength::STRONG);
+ if (result.IsJust()) {
+ switch (result.FromJust()) {
+ case ComparisonResult::kLessThan:
+ return Smi::FromInt(LESS);
+ case ComparisonResult::kEqual:
+ return Smi::FromInt(EQUAL);
+ case ComparisonResult::kGreaterThan:
+ return Smi::FromInt(GREATER);
+ case ComparisonResult::kUndefined:
+ return *ncr;
+ }
+ UNREACHABLE();
+ }
+ return isolate->heap()->exception();
+}
+
+
+RUNTIME_FUNCTION(Runtime_InstanceOf) {
+ // ECMA-262, section 11.8.6, page 54.
+ HandleScope shs(isolate);
+ DCHECK_EQ(2, args.length());
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, callable, 1);
+ // {callable} must have a [[Call]] internal method.
+ if (!callable->IsCallable()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInstanceofFunctionExpected, callable));
+ }
+ // If {object} is not a receiver, return false.
+ if (!object->IsJSReceiver()) {
+ return isolate->heap()->false_value();
+ }
+ // Check if {callable} is bound, if so, get [[BoundFunction]] from it and use
+ // that instead of {callable}.
+ if (callable->IsJSFunction()) {
+ Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
+ if (function->shared()->bound()) {
+ Handle<FixedArray> bindings(function->function_bindings(), isolate);
+ callable =
+ handle(bindings->get(JSFunction::kBoundFunctionIndex), isolate);
+ }
+ }
+ DCHECK(callable->IsCallable());
+ // Get the "prototype" of {callable}; raise an error if it's not a receiver.
+ Handle<Object> prototype;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, prototype,
+ Object::GetProperty(callable, isolate->factory()->prototype_string()));
+ if (!prototype->IsJSReceiver()) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kInstanceofNonobjectProto, prototype));
+ }
+ // Return whether or not {prototype} is in the prototype chain of {object}.
+ return isolate->heap()->ToBoolean(
+ object->HasInPrototypeChain(isolate, *prototype));
+}
+
+
+RUNTIME_FUNCTION(Runtime_HasInPrototypeChain) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_CHECKED(Object, object, 0);
+ CONVERT_ARG_CHECKED(Object, prototype, 1);
+ return isolate->heap()->ToBoolean(
+ object->HasInPrototypeChain(isolate, prototype));
+}
+
+
+// ES6 section 7.4.7 CreateIterResultObject ( value, done )
+RUNTIME_FUNCTION(Runtime_CreateIterResultObject) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, done, 1);
+ return *isolate->factory()->NewJSIteratorResult(value, done);
+}
+
+
RUNTIME_FUNCTION(Runtime_IsAccessCheckNeeded) {
SealHandleScope shs(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(Object, object, 0);
return isolate->heap()->ToBoolean(object->IsAccessCheckNeeded());
}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-observe.cc b/deps/v8/src/runtime/runtime-observe.cc
index e4ce23f87e..df0b2a330c 100644
--- a/deps/v8/src/runtime/runtime-observe.cc
+++ b/deps/v8/src/runtime/runtime-observe.cc
@@ -6,6 +6,7 @@
#include "src/arguments.h"
#include "src/debug/debug.h"
+#include "src/isolate-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/runtime/runtime-operators.cc b/deps/v8/src/runtime/runtime-operators.cc
new file mode 100644
index 0000000000..b5e92af8f6
--- /dev/null
+++ b/deps/v8/src/runtime/runtime-operators.cc
@@ -0,0 +1,277 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/arguments.h"
+#include "src/isolate-inl.h"
+#include "src/runtime/runtime-utils.h"
+
+namespace v8 {
+namespace internal {
+
+RUNTIME_FUNCTION(Runtime_Multiply) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::Multiply(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Multiply_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::Multiply(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Divide) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::Divide(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Divide_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::Divide(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Modulus) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::Modulus(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Modulus_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::Modulus(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Add) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::Add(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Add_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::Add(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Subtract) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::Subtract(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_Subtract_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::Subtract(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ShiftLeft) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ShiftLeft(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ShiftLeft_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::ShiftLeft(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ShiftRight) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::ShiftRight(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ShiftRight_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::ShiftRight(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ShiftRightLogical) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::ShiftRightLogical(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_ShiftRightLogical_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Object::ShiftRightLogical(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BitwiseAnd) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::BitwiseAnd(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BitwiseAnd_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::BitwiseAnd(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BitwiseOr) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::BitwiseOr(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BitwiseOr_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::BitwiseOr(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BitwiseXor) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+ Object::BitwiseXor(isolate, lhs, rhs));
+ return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_BitwiseXor_Strong) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(2, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, lhs, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Object, rhs, 1);
+ Handle<Object> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, Object::BitwiseXor(isolate, lhs, rhs, Strength::STRONG));
+ return *result;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index 03af691cf3..4699647b80 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -25,7 +25,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, call_trap, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, call_trap, 1);
RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, construct_trap, 2);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 3);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index de671f5783..48154ea275 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -6,10 +6,10 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/jsregexp.h"
-#include "src/runtime/runtime-utils.h"
#include "src/string-builder.h"
#include "src/string-search.h"
@@ -738,20 +738,20 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
DCHECK(result->HasFastObjectElements());
- if (part_count == 1 && indices.at(0) == subject_length) {
- FixedArray::cast(result->elements())->set(0, *subject);
- return *result;
- }
-
Handle<FixedArray> elements(FixedArray::cast(result->elements()));
- int part_start = 0;
- for (int i = 0; i < part_count; i++) {
- HandleScope local_loop_handle(isolate);
- int part_end = indices.at(i);
- Handle<String> substring =
- isolate->factory()->NewProperSubString(subject, part_start, part_end);
- elements->set(i, *substring);
- part_start = part_end + pattern_length;
+
+ if (part_count == 1 && indices.at(0) == subject_length) {
+ elements->set(0, *subject);
+ } else {
+ int part_start = 0;
+ for (int i = 0; i < part_count; i++) {
+ HandleScope local_loop_handle(isolate);
+ int part_end = indices.at(i);
+ Handle<String> substring =
+ isolate->factory()->NewProperSubString(subject, part_start, part_end);
+ elements->set(i, *substring);
+ part_start = part_end + pattern_length;
+ }
}
if (limit == 0xffffffffu) {
@@ -985,7 +985,7 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
+ CONVERT_ARG_HANDLE_CHECKED(LiteralsArray, literals, 0);
CONVERT_SMI_ARG_CHECKED(index, 1);
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
@@ -996,7 +996,7 @@ RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, regexp,
RegExpImpl::CreateRegExpLiteral(constructor, pattern, flags));
- literals->set(index, *regexp);
+ literals->set_literal(index, *regexp);
return *regexp;
}
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 01c828bf40..c3928a7703 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -7,6 +7,7 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/frames-inl.h"
+#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
@@ -206,8 +207,9 @@ namespace {
Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
Handle<Object> initial_value,
PropertyAttributes attr) {
- // Declarations are always made in a function, eval or script context. In
- // the case of eval code, the context passed is the context of the caller,
+ // Declarations are always made in a function, eval or script context, or
+ // a declaration block scope.
+ // In the case of eval code, the context passed is the context of the caller,
// which may be some nested context and not the declaration context.
Handle<Context> context_arg(isolate->context(), isolate);
Handle<Context> context(context_arg->declaration_context(), isolate);
@@ -241,8 +243,7 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
return DeclareGlobals(isolate, Handle<JSGlobalObject>::cast(holder), name,
value, attr, is_var, is_const, is_function);
}
- if (context_arg->has_extension() &&
- context_arg->extension()->IsJSGlobalObject()) {
+ if (context_arg->extension()->IsJSGlobalObject()) {
Handle<JSGlobalObject> global(
JSGlobalObject::cast(context_arg->extension()), isolate);
return DeclareGlobals(isolate, global, name, value, attr, is_var, is_const,
@@ -265,7 +266,7 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
if (is_var) return isolate->heap()->undefined_value();
DCHECK(is_function);
- if (index >= 0) {
+ if (index != Context::kNotFound) {
DCHECK(holder.is_identical_to(context));
context->set(index, *initial_value);
return isolate->heap()->undefined_value();
@@ -274,7 +275,19 @@ Object* DeclareLookupSlot(Isolate* isolate, Handle<String> name,
object = Handle<JSObject>::cast(holder);
} else if (context->has_extension()) {
- object = handle(JSObject::cast(context->extension()));
+ // Sloppy varblock contexts might not have an extension object yet,
+ // in which case their extension is a ScopeInfo.
+ if (context->extension()->IsScopeInfo()) {
+ DCHECK(context->IsBlockContext());
+ object = isolate->factory()->NewJSObject(
+ isolate->context_extension_function());
+ Handle<Object> extension =
+ isolate->factory()->NewSloppyBlockWithEvalContextExtension(
+ handle(context->scope_info()), object);
+ context->set_extension(*extension);
+ } else {
+ object = handle(context->extension_object(), isolate);
+ }
DCHECK(object->IsJSContextExtensionObject() || object->IsJSGlobalObject());
} else {
DCHECK(context->IsFunctionContext());
@@ -334,7 +347,7 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
if (isolate->has_pending_exception()) return isolate->heap()->exception();
}
- if (index >= 0) {
+ if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
// Property was found in a context. Perform the assignment if the constant
// was uninitialized.
@@ -357,8 +370,8 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
if (declaration_context->IsScriptContext()) {
holder = handle(declaration_context->global_object(), isolate);
} else {
- DCHECK(declaration_context->has_extension());
- holder = handle(declaration_context->extension(), isolate);
+ holder = handle(declaration_context->extension_object(), isolate);
+ DCHECK(!holder.is_null());
}
CHECK(holder->IsJSObject());
} else {
@@ -391,10 +404,11 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
}
-static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
- Handle<JSFunction> callee,
- Object** parameters,
- int argument_count) {
+namespace {
+
+template <typename T>
+Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
+ T parameters, int argument_count) {
CHECK(!IsSubclassConstructor(callee->shared()->kind()));
DCHECK(callee->has_simple_parameters());
Handle<JSObject> result =
@@ -424,7 +438,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
while (index >= mapped_count) {
// These go directly in the arguments array and have no
// corresponding slot in the parameter map.
- arguments->set(index, *(parameters - index - 1));
+ arguments->set(index, parameters[index]);
--index;
}
@@ -444,7 +458,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
if (duplicate) {
// This goes directly in the arguments array with a hole in the
// parameter map.
- arguments->set(index, *(parameters - index - 1));
+ arguments->set(index, parameters[index]);
parameter_map->set_the_hole(index + 2);
} else {
// The context index goes in the parameter map with a hole in the
@@ -473,7 +487,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
isolate->factory()->NewFixedArray(argument_count, NOT_TENURED);
result->set_elements(*elements);
for (int i = 0; i < argument_count; ++i) {
- elements->set(i, *(parameters - i - 1));
+ elements->set(i, parameters[i]);
}
}
}
@@ -481,10 +495,9 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
}
-static Handle<JSObject> NewStrictArguments(Isolate* isolate,
- Handle<JSFunction> callee,
- Object** parameters,
- int argument_count) {
+template <typename T>
+Handle<JSObject> NewStrictArguments(Isolate* isolate, Handle<JSFunction> callee,
+ T parameters, int argument_count) {
Handle<JSObject> result =
isolate->factory()->NewArgumentsObject(callee, argument_count);
@@ -494,7 +507,7 @@ static Handle<JSObject> NewStrictArguments(Isolate* isolate,
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
for (int i = 0; i < argument_count; i++) {
- array->set(i, *--parameters, mode);
+ array->set(i, parameters[i], mode);
}
result->set_elements(*array);
}
@@ -502,24 +515,53 @@ static Handle<JSObject> NewStrictArguments(Isolate* isolate,
}
-RUNTIME_FUNCTION(Runtime_NewArguments) {
+class HandleArguments BASE_EMBEDDED {
+ public:
+ explicit HandleArguments(Handle<Object>* array) : array_(array) {}
+ Object* operator[](int index) { return *array_[index]; }
+
+ private:
+ Handle<Object>* array_;
+};
+
+
+class ParameterArguments BASE_EMBEDDED {
+ public:
+ explicit ParameterArguments(Object** parameters) : parameters_(parameters) {}
+ Object*& operator[](int index) { return *(parameters_ - index - 1); }
+
+ private:
+ Object** parameters_;
+};
+
+} // namespace
+
+
+RUNTIME_FUNCTION(Runtime_NewSloppyArguments_Generic) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
- JavaScriptFrameIterator it(isolate);
-
- // Find the frame that holds the actual arguments passed to the function.
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
+ // This generic runtime function can also be used when the caller has been
+ // inlined, we use the slow but accurate {Runtime::GetCallerArguments}.
+ int argument_count = 0;
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ HandleArguments argument_getter(arguments.get());
+ return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
+}
- // Determine parameter location on the stack and dispatch on language mode.
- int argument_count = frame->GetArgumentsLength();
- Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
- return (is_strict(callee->shared()->language_mode()) ||
- !callee->has_simple_parameters())
- ? *NewStrictArguments(isolate, callee, parameters, argument_count)
- : *NewSloppyArguments(isolate, callee, parameters, argument_count);
+RUNTIME_FUNCTION(Runtime_NewStrictArguments_Generic) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+ // This generic runtime function can also be used when the caller has been
+ // inlined, we use the slow but accurate {Runtime::GetCallerArguments}.
+ int argument_count = 0;
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ HandleArguments argument_getter(arguments.get());
+ return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
}
@@ -529,7 +571,14 @@ RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
Object** parameters = reinterpret_cast<Object**>(args[1]);
CONVERT_SMI_ARG_CHECKED(argument_count, 2);
- return *NewSloppyArguments(isolate, callee, parameters, argument_count);
+#ifdef DEBUG
+ // This runtime function does not materialize the correct arguments when the
+ // caller has been inlined, better make sure we are not hitting that case.
+ JavaScriptFrameIterator it(isolate);
+ DCHECK(!it.frame()->HasInlinedFrames());
+#endif // DEBUG
+ ParameterArguments argument_getter(parameters);
+ return *NewSloppyArguments(isolate, callee, argument_getter, argument_count);
}
@@ -539,81 +588,36 @@ RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
Object** parameters = reinterpret_cast<Object**>(args[1]);
CONVERT_SMI_ARG_CHECKED(argument_count, 2);
- return *NewStrictArguments(isolate, callee, parameters, argument_count);
-}
-
-
-static Handle<JSArray> NewRestParam(Isolate* isolate, Object** parameters,
- int num_params, int rest_index,
- LanguageMode language_mode) {
- parameters -= rest_index;
- int num_elements = std::max(0, num_params - rest_index);
- Handle<FixedArray> elements =
- isolate->factory()->NewUninitializedFixedArray(num_elements);
- for (int i = 0; i < num_elements; ++i) {
- elements->set(i, *--parameters);
- }
- return isolate->factory()->NewJSArrayWithElements(
- elements, FAST_ELEMENTS, num_elements, strength(language_mode));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewRestParam) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- Object** parameters = reinterpret_cast<Object**>(args[0]);
- CONVERT_SMI_ARG_CHECKED(num_params, 1);
- CONVERT_SMI_ARG_CHECKED(rest_index, 2);
- CONVERT_SMI_ARG_CHECKED(language_mode, 3);
-
- return *NewRestParam(isolate, parameters, num_params, rest_index,
- static_cast<LanguageMode>(language_mode));
-}
-
-
-RUNTIME_FUNCTION(Runtime_NewRestParamSlow) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(rest_index, 0);
- CONVERT_SMI_ARG_CHECKED(language_mode, 1);
-
+#ifdef DEBUG
+ // This runtime function does not materialize the correct arguments when the
+ // caller has been inlined, better make sure we are not hitting that case.
JavaScriptFrameIterator it(isolate);
-
- // Find the frame that holds the actual arguments passed to the function.
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- int argument_count = frame->GetArgumentsLength();
- Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
-
- return *NewRestParam(isolate, parameters, argument_count, rest_index,
- static_cast<LanguageMode>(language_mode));
+ DCHECK(!it.frame()->HasInlinedFrames());
+#endif // DEBUG
+ ParameterArguments argument_getter(parameters);
+ return *NewStrictArguments(isolate, callee, argument_getter, argument_count);
}
-RUNTIME_FUNCTION(Runtime_NewClosureFromStubFailure) {
+RUNTIME_FUNCTION(Runtime_NewClosure) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
- Handle<Context> context(isolate->context());
- PretenureFlag pretenure_flag = NOT_TENURED;
+ Handle<Context> context(isolate->context(), isolate);
return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- pretenure_flag);
+ NOT_TENURED);
}
-RUNTIME_FUNCTION(Runtime_NewClosure) {
+RUNTIME_FUNCTION(Runtime_NewClosure_Tenured) {
HandleScope scope(isolate);
- DCHECK(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
- CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1);
- CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2);
-
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
+ Handle<Context> context(isolate->context(), isolate);
// The caller ensures that we pretenure closures that are assigned
// directly to properties.
- PretenureFlag pretenure_flag = pretenure ? TENURED : NOT_TENURED;
return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
- pretenure_flag);
+ TENURED);
}
static Object* FindNameClash(Handle<ScopeInfo> scope_info,
@@ -938,8 +942,7 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
return MakePair(isolate->heap()->exception(), NULL);
}
- // If the index is non-negative, the slot has been found in a context.
- if (index >= 0) {
+ if (index != Context::kNotFound) {
DCHECK(holder->IsContext());
// If the "property" we were looking for is a local variable, the
// receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
@@ -1041,7 +1044,7 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
}
// The property was found in a context slot.
- if (index >= 0) {
+ if (index != Context::kNotFound) {
if ((binding_flags == MUTABLE_CHECK_INITIALIZED ||
binding_flags == IMMUTABLE_CHECK_INITIALIZED_HARMONY) &&
Handle<Context>::cast(holder)->is_the_hole(index)) {
@@ -1082,35 +1085,32 @@ RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK(args.length() == 0);
- JavaScriptFrameIterator it(isolate);
- JavaScriptFrame* frame = it.frame();
- return Smi::FromInt(frame->GetArgumentsLength());
+ int argument_count = 0;
+ Runtime::GetCallerArguments(isolate, 0, &argument_count);
+ return Smi::FromInt(argument_count);
}
RUNTIME_FUNCTION(Runtime_Arguments) {
- SealHandleScope shs(isolate);
+ HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0);
- // Compute the frame holding the arguments.
- JavaScriptFrameIterator it(isolate);
- it.AdvanceToArgumentsFrame();
- JavaScriptFrame* frame = it.frame();
-
- // Get the actual number of provided arguments.
- const uint32_t n = frame->ComputeParametersCount();
+ // Determine the actual arguments passed to the function.
+ int argument_count_signed = 0;
+ base::SmartArrayPointer<Handle<Object>> arguments =
+ Runtime::GetCallerArguments(isolate, 0, &argument_count_signed);
+ const uint32_t argument_count = argument_count_signed;
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
uint32_t index = 0;
- if (raw_key->ToArrayIndex(&index) && index < n) {
- return frame->GetParameter(index);
+ if (raw_key->ToArrayIndex(&index) && index < argument_count) {
+ return *arguments[index];
}
- HandleScope scope(isolate);
if (raw_key->IsSymbol()) {
Handle<Symbol> symbol = Handle<Symbol>::cast(raw_key);
if (Name::Equals(symbol, isolate->factory()->iterator_symbol())) {
@@ -1128,13 +1128,13 @@ RUNTIME_FUNCTION(Runtime_Arguments) {
// Convert the key to a string.
Handle<Object> converted;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, converted,
- Execution::ToString(isolate, raw_key));
+ Object::ToString(isolate, raw_key));
Handle<String> key = Handle<String>::cast(converted);
// Try to convert the string key into an array index.
if (key->AsArrayIndex(&index)) {
- if (index < n) {
- return frame->GetParameter(index);
+ if (index < argument_count) {
+ return *arguments[index];
} else {
Handle<Object> initial_prototype(isolate->initial_object_prototype());
Handle<Object> result;
@@ -1147,10 +1147,11 @@ RUNTIME_FUNCTION(Runtime_Arguments) {
// Handle special arguments properties.
if (String::Equals(isolate->factory()->length_string(), key)) {
- return Smi::FromInt(n);
+ return Smi::FromInt(argument_count);
}
if (String::Equals(isolate->factory()->callee_string(), key)) {
- JSFunction* function = frame->function();
+ JavaScriptFrameIterator it(isolate);
+ JSFunction* function = it.frame()->function();
if (is_strict(function->shared()->language_mode())) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kStrictPoisonPill));
diff --git a/deps/v8/src/runtime/runtime-simd.cc b/deps/v8/src/runtime/runtime-simd.cc
index ce9512e8da..0a1034dfc2 100644
--- a/deps/v8/src/runtime/runtime-simd.cc
+++ b/deps/v8/src/runtime/runtime-simd.cc
@@ -21,6 +21,29 @@ namespace {
// Functions to convert Numbers to SIMD component types.
+template <typename T, typename F>
+static bool CanCast(F from) {
+ // A float can't represent 2^31 - 1 or 2^32 - 1 exactly, so promote the limits
+ // to double. Otherwise, the limit is truncated and numbers like 2^31 or 2^32
+ // get through, causing any static_cast to be undefined.
+ return from >= static_cast<double>(std::numeric_limits<T>::min()) &&
+ from <= static_cast<double>(std::numeric_limits<T>::max());
+}
+
+
+// Explicitly specialize for conversions to float, which always succeed.
+template <>
+bool CanCast<float>(int32_t from) {
+ return true;
+}
+
+
+template <>
+bool CanCast<float>(uint32_t from) {
+ return true;
+}
+
+
template <typename T>
static T ConvertNumber(double number);
@@ -38,17 +61,35 @@ int32_t ConvertNumber<int32_t>(double number) {
template <>
+uint32_t ConvertNumber<uint32_t>(double number) {
+ return DoubleToUint32(number);
+}
+
+
+template <>
int16_t ConvertNumber<int16_t>(double number) {
return static_cast<int16_t>(DoubleToInt32(number));
}
template <>
+uint16_t ConvertNumber<uint16_t>(double number) {
+ return static_cast<uint16_t>(DoubleToUint32(number));
+}
+
+
+template <>
int8_t ConvertNumber<int8_t>(double number) {
return static_cast<int8_t>(DoubleToInt32(number));
}
+template <>
+uint8_t ConvertNumber<uint8_t>(double number) {
+ return static_cast<uint8_t>(DoubleToUint32(number));
+}
+
+
// TODO(bbudge): Make this consistent with SIMD instruction results.
inline float RecipApprox(float a) { return 1.0f / a; }
@@ -110,15 +151,6 @@ inline float MaxNumber(float a, float b) {
return Max(a, b);
}
-
-inline bool CanCast(int32_t a) { return true; }
-
-
-inline bool CanCast(float a) {
- return a > std::numeric_limits<int32_t>::min() &&
- a < std::numeric_limits<int32_t>::max();
-}
-
} // namespace
//-------------------------------------------------------------------
@@ -132,23 +164,6 @@ RUNTIME_FUNCTION(Runtime_IsSimdValue) {
}
-RUNTIME_FUNCTION(Runtime_SimdToObject) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(Simd128Value, value, 0);
- return *Object::ToObject(isolate, value).ToHandleChecked();
-}
-
-
-RUNTIME_FUNCTION(Runtime_SimdEquals) {
- SealHandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
- CONVERT_ARG_CHECKED(Simd128Value, x, 0);
- CONVERT_ARG_CHECKED(Simd128Value, y, 1);
- return Smi::FromInt(x->Equals(y) ? EQUAL : NOT_EQUAL);
-}
-
-
RUNTIME_FUNCTION(Runtime_SimdSameValue) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -240,13 +255,16 @@ RUNTIME_FUNCTION(Runtime_SimdSameValueZero) {
#define GET_BOOLEAN_ARG(lane_type, name, index) \
name = args[index]->BooleanValue();
-#define SIMD_ALL_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Int32x4, int32_t, 4, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Bool32x4, bool, 4, ToBoolean, GET_BOOLEAN_ARG) \
- FUNCTION(Int16x8, int16_t, 8, NewNumber, GET_NUMERIC_ARG) \
- FUNCTION(Bool16x8, bool, 8, ToBoolean, GET_BOOLEAN_ARG) \
- FUNCTION(Int8x16, int8_t, 16, NewNumber, GET_NUMERIC_ARG) \
+#define SIMD_ALL_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Int32x4, int32_t, 4, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Uint32x4, uint32_t, 4, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Bool32x4, bool, 4, ToBoolean, GET_BOOLEAN_ARG) \
+ FUNCTION(Int16x8, int16_t, 8, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Uint16x8, uint16_t, 8, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Bool16x8, bool, 8, ToBoolean, GET_BOOLEAN_ARG) \
+ FUNCTION(Int8x16, int8_t, 16, NewNumber, GET_NUMERIC_ARG) \
+ FUNCTION(Uint8x16, uint8_t, 16, NewNumber, GET_NUMERIC_ARG) \
FUNCTION(Bool8x16, bool, 16, ToBoolean, GET_BOOLEAN_ARG)
#define SIMD_CREATE_FUNCTION(type, lane_type, lane_count, extract, replace) \
@@ -403,6 +421,11 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
FUNCTION(Int16x8, int16_t, 16, 8) \
FUNCTION(Int8x16, int8_t, 8, 16)
+#define SIMD_UINT_TYPES(FUNCTION) \
+ FUNCTION(Uint32x4, uint32_t, 32, 4) \
+ FUNCTION(Uint16x8, uint16_t, 16, 8) \
+ FUNCTION(Uint8x16, uint8_t, 8, 16)
+
#define CONVERT_SHIFT_ARG_CHECKED(name, index) \
RUNTIME_ASSERT(args[index]->IsNumber()); \
int32_t signed_shift = 0; \
@@ -427,7 +450,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
}
#define SIMD_LSR_FUNCTION(type, lane_type, lane_bits, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##ShiftRightLogicalByScalar) { \
+ RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
@@ -437,7 +460,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
if (shift < lane_bits) { \
for (int i = 0; i < kLaneCount; i++) { \
lanes[i] = static_cast<lane_type>( \
- bit_cast<u##lane_type>(a->get_lane(i)) >> shift); \
+ bit_cast<lane_type>(a->get_lane(i)) >> shift); \
} \
} \
Handle<type> result = isolate->factory()->New##type(lanes); \
@@ -445,7 +468,7 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
}
#define SIMD_ASR_FUNCTION(type, lane_type, lane_bits, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##ShiftRightArithmeticByScalar) { \
+ RUNTIME_FUNCTION(Runtime_##type##ShiftRightByScalar) { \
static const int kLaneCount = lane_count; \
HandleScope scope(isolate); \
DCHECK(args.length() == 2); \
@@ -462,8 +485,9 @@ SIMD_MAXNUM_FUNCTION(Float32x4, float, 4)
}
SIMD_INT_TYPES(SIMD_LSL_FUNCTION)
-SIMD_INT_TYPES(SIMD_LSR_FUNCTION)
+SIMD_UINT_TYPES(SIMD_LSL_FUNCTION)
SIMD_INT_TYPES(SIMD_ASR_FUNCTION)
+SIMD_UINT_TYPES(SIMD_LSR_FUNCTION)
//-------------------------------------------------------------------
@@ -513,7 +537,9 @@ SIMD_BOOL_TYPES(SIMD_ALL_FUNCTION)
#define SIMD_SMALL_INT_TYPES(FUNCTION) \
FUNCTION(Int16x8, int16_t, 8) \
- FUNCTION(Int8x16, int8_t, 16)
+ FUNCTION(Uint16x8, uint16_t, 8) \
+ FUNCTION(Int8x16, int8_t, 16) \
+ FUNCTION(Uint8x16, uint8_t, 16)
#define SIMD_ADD_SATURATE_FUNCTION(type, lane_type, lane_count) \
RUNTIME_FUNCTION(Runtime_##type##AddSaturate) { \
@@ -540,15 +566,11 @@ SIMD_SMALL_INT_TYPES(SIMD_SUB_SATURATE_FUNCTION)
#define SIMD_NUMERIC_TYPES(FUNCTION) \
FUNCTION(Float32x4, float, 4) \
FUNCTION(Int32x4, int32_t, 4) \
+ FUNCTION(Uint32x4, uint32_t, 4) \
FUNCTION(Int16x8, int16_t, 8) \
- FUNCTION(Int8x16, int8_t, 16)
-
-#define SIMD_NEG_FUNCTION(type, lane_type, lane_count) \
- RUNTIME_FUNCTION(Runtime_##type##Neg) { \
- HandleScope scope(isolate); \
- SIMD_UNARY_OP(type, lane_type, lane_count, -, result); \
- return *result; \
- }
+ FUNCTION(Uint16x8, uint16_t, 8) \
+ FUNCTION(Int8x16, int8_t, 16) \
+ FUNCTION(Uint8x16, uint8_t, 16)
#define BINARY_ADD(a, b) (a) + (b)
#define SIMD_ADD_FUNCTION(type, lane_type, lane_count) \
@@ -588,7 +610,6 @@ SIMD_SMALL_INT_TYPES(SIMD_SUB_SATURATE_FUNCTION)
return *result; \
}
-SIMD_NUMERIC_TYPES(SIMD_NEG_FUNCTION)
SIMD_NUMERIC_TYPES(SIMD_ADD_FUNCTION)
SIMD_NUMERIC_TYPES(SIMD_SUB_FUNCTION)
SIMD_NUMERIC_TYPES(SIMD_MUL_FUNCTION)
@@ -602,8 +623,11 @@ SIMD_NUMERIC_TYPES(SIMD_MAX_FUNCTION)
#define SIMD_RELATIONAL_TYPES(FUNCTION) \
FUNCTION(Float32x4, Bool32x4, 4) \
FUNCTION(Int32x4, Bool32x4, 4) \
+ FUNCTION(Uint32x4, Bool32x4, 4) \
FUNCTION(Int16x8, Bool16x8, 8) \
- FUNCTION(Int8x16, Bool8x16, 16)
+ FUNCTION(Uint16x8, Bool16x8, 8) \
+ FUNCTION(Int8x16, Bool8x16, 16) \
+ FUNCTION(Uint8x16, Bool8x16, 16)
#define SIMD_EQUALITY_TYPES(FUNCTION) \
SIMD_RELATIONAL_TYPES(FUNCTION) \
@@ -665,12 +689,15 @@ SIMD_RELATIONAL_TYPES(SIMD_GREATER_THAN_OR_EQUAL_FUNCTION)
// Logical functions.
-#define SIMD_LOGICAL_TYPES(FUNCTION) \
- FUNCTION(Int32x4, int32_t, 4, _INT) \
- FUNCTION(Int16x8, int16_t, 8, _INT) \
- FUNCTION(Int8x16, int8_t, 16, _INT) \
- FUNCTION(Bool32x4, bool, 4, _BOOL) \
- FUNCTION(Bool16x8, bool, 8, _BOOL) \
+#define SIMD_LOGICAL_TYPES(FUNCTION) \
+ FUNCTION(Int32x4, int32_t, 4, _INT) \
+ FUNCTION(Uint32x4, uint32_t, 4, _INT) \
+ FUNCTION(Int16x8, int16_t, 8, _INT) \
+ FUNCTION(Uint16x8, uint16_t, 8, _INT) \
+ FUNCTION(Int8x16, int8_t, 16, _INT) \
+ FUNCTION(Uint8x16, uint8_t, 16, _INT) \
+ FUNCTION(Bool32x4, bool, 4, _BOOL) \
+ FUNCTION(Bool16x8, bool, 8, _BOOL) \
FUNCTION(Bool8x16, bool, 16, _BOOL)
#define BINARY_AND_INT(a, b) (a) & (b)
@@ -718,11 +745,14 @@ SIMD_LOGICAL_TYPES(SIMD_NOT_FUNCTION)
// Select functions.
-#define SIMD_SELECT_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, Bool32x4, 4) \
- FUNCTION(Int32x4, int32_t, Bool32x4, 4) \
- FUNCTION(Int16x8, int16_t, Bool16x8, 8) \
- FUNCTION(Int8x16, int8_t, Bool8x16, 16)
+#define SIMD_SELECT_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, Bool32x4, 4) \
+ FUNCTION(Int32x4, int32_t, Bool32x4, 4) \
+ FUNCTION(Uint32x4, uint32_t, Bool32x4, 4) \
+ FUNCTION(Int16x8, int16_t, Bool16x8, 8) \
+ FUNCTION(Uint16x8, uint16_t, Bool16x8, 8) \
+ FUNCTION(Int8x16, int8_t, Bool8x16, 16) \
+ FUNCTION(Uint8x16, uint8_t, Bool8x16, 16)
#define SIMD_SELECT_FUNCTION(type, lane_type, bool_type, lane_count) \
RUNTIME_FUNCTION(Runtime_##type##Select) { \
@@ -744,11 +774,38 @@ SIMD_SELECT_TYPES(SIMD_SELECT_FUNCTION)
//-------------------------------------------------------------------
+// Signed / unsigned functions.
+
+#define SIMD_SIGNED_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4) \
+ FUNCTION(Int32x4, int32_t, 4) \
+ FUNCTION(Int16x8, int16_t, 8) \
+ FUNCTION(Int8x16, int8_t, 16)
+
+#define SIMD_NEG_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Neg) { \
+ HandleScope scope(isolate); \
+ SIMD_UNARY_OP(type, lane_type, lane_count, -, result); \
+ return *result; \
+ }
+
+SIMD_SIGNED_TYPES(SIMD_NEG_FUNCTION)
+
+//-------------------------------------------------------------------
+
// Casting functions.
-#define SIMD_FROM_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4, Int32x4, int32_t) \
- FUNCTION(Int32x4, int32_t, 4, Float32x4, float)
+#define SIMD_FROM_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4, Int32x4, int32_t) \
+ FUNCTION(Float32x4, float, 4, Uint32x4, uint32_t) \
+ FUNCTION(Int32x4, int32_t, 4, Float32x4, float) \
+ FUNCTION(Int32x4, int32_t, 4, Uint32x4, uint32_t) \
+ FUNCTION(Uint32x4, uint32_t, 4, Float32x4, float) \
+ FUNCTION(Uint32x4, uint32_t, 4, Int32x4, int32_t) \
+ FUNCTION(Int16x8, int16_t, 8, Uint16x8, uint16_t) \
+ FUNCTION(Uint16x8, uint16_t, 8, Int16x8, int16_t) \
+ FUNCTION(Int8x16, int8_t, 16, Uint8x16, uint8_t) \
+ FUNCTION(Uint8x16, uint8_t, 16, Int8x16, int8_t)
#define SIMD_FROM_FUNCTION(type, lane_type, lane_count, from_type, from_ctype) \
RUNTIME_FUNCTION(Runtime_##type##From##from_type) { \
@@ -759,7 +816,8 @@ SIMD_SELECT_TYPES(SIMD_SELECT_FUNCTION)
lane_type lanes[kLaneCount]; \
for (int i = 0; i < kLaneCount; i++) { \
from_ctype a_value = a->get_lane(i); \
- RUNTIME_ASSERT(CanCast(a_value)); \
+ if (a_value != a_value) a_value = 0; \
+ RUNTIME_ASSERT(CanCast<lane_type>(a_value)); \
lanes[i] = static_cast<lane_type>(a_value); \
} \
Handle<type> result = isolate->factory()->New##type(lanes); \
@@ -768,19 +826,49 @@ SIMD_SELECT_TYPES(SIMD_SELECT_FUNCTION)
SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
-#define SIMD_FROM_BITS_TYPES(FUNCTION) \
- FUNCTION(Float32x4, float, 4, Int32x4) \
- FUNCTION(Float32x4, float, 4, Int16x8) \
- FUNCTION(Float32x4, float, 4, Int8x16) \
- FUNCTION(Int32x4, int32_t, 4, Float32x4) \
- FUNCTION(Int32x4, int32_t, 4, Int16x8) \
- FUNCTION(Int32x4, int32_t, 4, Int8x16) \
- FUNCTION(Int16x8, int16_t, 8, Float32x4) \
- FUNCTION(Int16x8, int16_t, 8, Int32x4) \
- FUNCTION(Int16x8, int16_t, 8, Int8x16) \
- FUNCTION(Int8x16, int8_t, 16, Float32x4) \
- FUNCTION(Int8x16, int8_t, 16, Int32x4) \
- FUNCTION(Int8x16, int8_t, 16, Int16x8)
+#define SIMD_FROM_BITS_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4, Int32x4) \
+ FUNCTION(Float32x4, float, 4, Uint32x4) \
+ FUNCTION(Float32x4, float, 4, Int16x8) \
+ FUNCTION(Float32x4, float, 4, Uint16x8) \
+ FUNCTION(Float32x4, float, 4, Int8x16) \
+ FUNCTION(Float32x4, float, 4, Uint8x16) \
+ FUNCTION(Int32x4, int32_t, 4, Float32x4) \
+ FUNCTION(Int32x4, int32_t, 4, Uint32x4) \
+ FUNCTION(Int32x4, int32_t, 4, Int16x8) \
+ FUNCTION(Int32x4, int32_t, 4, Uint16x8) \
+ FUNCTION(Int32x4, int32_t, 4, Int8x16) \
+ FUNCTION(Int32x4, int32_t, 4, Uint8x16) \
+ FUNCTION(Uint32x4, uint32_t, 4, Float32x4) \
+ FUNCTION(Uint32x4, uint32_t, 4, Int32x4) \
+ FUNCTION(Uint32x4, uint32_t, 4, Int16x8) \
+ FUNCTION(Uint32x4, uint32_t, 4, Uint16x8) \
+ FUNCTION(Uint32x4, uint32_t, 4, Int8x16) \
+ FUNCTION(Uint32x4, uint32_t, 4, Uint8x16) \
+ FUNCTION(Int16x8, int16_t, 8, Float32x4) \
+ FUNCTION(Int16x8, int16_t, 8, Int32x4) \
+ FUNCTION(Int16x8, int16_t, 8, Uint32x4) \
+ FUNCTION(Int16x8, int16_t, 8, Uint16x8) \
+ FUNCTION(Int16x8, int16_t, 8, Int8x16) \
+ FUNCTION(Int16x8, int16_t, 8, Uint8x16) \
+ FUNCTION(Uint16x8, uint16_t, 8, Float32x4) \
+ FUNCTION(Uint16x8, uint16_t, 8, Int32x4) \
+ FUNCTION(Uint16x8, uint16_t, 8, Uint32x4) \
+ FUNCTION(Uint16x8, uint16_t, 8, Int16x8) \
+ FUNCTION(Uint16x8, uint16_t, 8, Int8x16) \
+ FUNCTION(Uint16x8, uint16_t, 8, Uint8x16) \
+ FUNCTION(Int8x16, int8_t, 16, Float32x4) \
+ FUNCTION(Int8x16, int8_t, 16, Int32x4) \
+ FUNCTION(Int8x16, int8_t, 16, Uint32x4) \
+ FUNCTION(Int8x16, int8_t, 16, Int16x8) \
+ FUNCTION(Int8x16, int8_t, 16, Uint16x8) \
+ FUNCTION(Int8x16, int8_t, 16, Uint8x16) \
+ FUNCTION(Uint8x16, uint8_t, 16, Float32x4) \
+ FUNCTION(Uint8x16, uint8_t, 16, Int32x4) \
+ FUNCTION(Uint8x16, uint8_t, 16, Uint32x4) \
+ FUNCTION(Uint8x16, uint8_t, 16, Int16x8) \
+ FUNCTION(Uint8x16, uint8_t, 16, Uint16x8) \
+ FUNCTION(Uint8x16, uint8_t, 16, Int8x16)
#define SIMD_FROM_BITS_FUNCTION(type, lane_type, lane_count, from_type) \
RUNTIME_FUNCTION(Runtime_##type##From##from_type##Bits) { \
@@ -796,26 +884,132 @@ SIMD_FROM_TYPES(SIMD_FROM_FUNCTION)
SIMD_FROM_BITS_TYPES(SIMD_FROM_BITS_FUNCTION)
+
//-------------------------------------------------------------------
-// Unsigned extract functions.
-// TODO(bbudge): remove when spec changes to include unsigned int types.
+// Load and Store functions.
+
+#define SIMD_LOADN_STOREN_TYPES(FUNCTION) \
+ FUNCTION(Float32x4, float, 4) \
+ FUNCTION(Int32x4, int32_t, 4) \
+ FUNCTION(Uint32x4, uint32_t, 4)
+
+
+// Common Load and Store Functions
+
+#define SIMD_LOAD(type, lane_type, lane_count, count, result) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 2); \
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, tarray, 0); \
+ CONVERT_INT32_ARG_CHECKED(index, 1) \
+ size_t bpe = tarray->element_size(); \
+ uint32_t bytes = count * sizeof(lane_type); \
+ size_t byte_length = NumberToSize(isolate, tarray->byte_length()); \
+ RUNTIME_ASSERT(index >= 0 && index * bpe + bytes <= byte_length); \
+ size_t tarray_offset = NumberToSize(isolate, tarray->byte_offset()); \
+ uint8_t* tarray_base = \
+ static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
+ tarray_offset; \
+ lane_type lanes[kLaneCount] = {0}; \
+ memcpy(lanes, tarray_base + index * bpe, bytes); \
+ Handle<type> result = isolate->factory()->New##type(lanes);
-RUNTIME_FUNCTION(Runtime_Int16x8UnsignedExtractLane) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Int16x8, a, 0);
- CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, 8);
- return *isolate->factory()->NewNumber(bit_cast<uint16_t>(a->get_lane(lane)));
-}
+#define SIMD_STORE(type, lane_type, lane_count, count, a) \
+ static const int kLaneCount = lane_count; \
+ DCHECK(args.length() == 3); \
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, tarray, 0); \
+ CONVERT_INT32_ARG_CHECKED(index, 1) \
+ CONVERT_ARG_HANDLE_CHECKED(type, a, 2); \
+ size_t bpe = tarray->element_size(); \
+ uint32_t bytes = count * sizeof(lane_type); \
+ size_t byte_length = NumberToSize(isolate, tarray->byte_length()); \
+ RUNTIME_ASSERT(index >= 0 && index * bpe + bytes <= byte_length); \
+ size_t tarray_offset = NumberToSize(isolate, tarray->byte_offset()); \
+ uint8_t* tarray_base = \
+ static_cast<uint8_t*>(tarray->GetBuffer()->backing_store()) + \
+ tarray_offset; \
+ lane_type lanes[kLaneCount]; \
+ for (int i = 0; i < kLaneCount; i++) { \
+ lanes[i] = a->get_lane(i); \
+ } \
+ memcpy(tarray_base + index * bpe, lanes, bytes);
+
+
+#define SIMD_LOAD_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Load) { \
+ HandleScope scope(isolate); \
+ SIMD_LOAD(type, lane_type, lane_count, lane_count, result); \
+ return *result; \
+ }
+
+
+#define SIMD_LOAD1_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Load1) { \
+ HandleScope scope(isolate); \
+ SIMD_LOAD(type, lane_type, lane_count, 1, result); \
+ return *result; \
+ }
+
+
+#define SIMD_LOAD2_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Load2) { \
+ HandleScope scope(isolate); \
+ SIMD_LOAD(type, lane_type, lane_count, 2, result); \
+ return *result; \
+ }
+
+
+#define SIMD_LOAD3_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Load3) { \
+ HandleScope scope(isolate); \
+ SIMD_LOAD(type, lane_type, lane_count, 3, result); \
+ return *result; \
+ }
+
+
+#define SIMD_STORE_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Store) { \
+ HandleScope scope(isolate); \
+ SIMD_STORE(type, lane_type, lane_count, lane_count, a); \
+ return *a; \
+ }
+
+
+#define SIMD_STORE1_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Store1) { \
+ HandleScope scope(isolate); \
+ SIMD_STORE(type, lane_type, lane_count, 1, a); \
+ return *a; \
+ }
+
+
+#define SIMD_STORE2_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Store2) { \
+ HandleScope scope(isolate); \
+ SIMD_STORE(type, lane_type, lane_count, 2, a); \
+ return *a; \
+ }
+
+
+#define SIMD_STORE3_FUNCTION(type, lane_type, lane_count) \
+ RUNTIME_FUNCTION(Runtime_##type##Store3) { \
+ HandleScope scope(isolate); \
+ SIMD_STORE(type, lane_type, lane_count, 3, a); \
+ return *a; \
+ }
+
+
+SIMD_NUMERIC_TYPES(SIMD_LOAD_FUNCTION)
+SIMD_LOADN_STOREN_TYPES(SIMD_LOAD1_FUNCTION)
+SIMD_LOADN_STOREN_TYPES(SIMD_LOAD2_FUNCTION)
+SIMD_LOADN_STOREN_TYPES(SIMD_LOAD3_FUNCTION)
+SIMD_NUMERIC_TYPES(SIMD_STORE_FUNCTION)
+SIMD_LOADN_STOREN_TYPES(SIMD_STORE1_FUNCTION)
+SIMD_LOADN_STOREN_TYPES(SIMD_STORE2_FUNCTION)
+SIMD_LOADN_STOREN_TYPES(SIMD_STORE3_FUNCTION)
+
+//-------------------------------------------------------------------
-RUNTIME_FUNCTION(Runtime_Int8x16UnsignedExtractLane) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(Int8x16, a, 0);
- CONVERT_SIMD_LANE_ARG_CHECKED(lane, 1, 16);
- return *isolate->factory()->NewNumber(bit_cast<uint8_t>(a->get_lane(lane)));
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index bb4207f202..3ce5a58e2b 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -6,6 +6,7 @@
#include "src/arguments.h"
#include "src/conversions-inl.h"
+#include "src/isolate-inl.h"
#include "src/regexp/jsregexp-inl.h"
#include "src/regexp/jsregexp.h"
#include "src/string-builder.h"
@@ -418,70 +419,22 @@ RUNTIME_FUNCTION(Runtime_CharFromCode) {
RUNTIME_FUNCTION(Runtime_StringCompare) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 2);
-
+ DCHECK_EQ(2, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
-
isolate->counters()->string_compare_runtime()->Increment();
-
- // A few fast case tests before we flatten.
- if (x.is_identical_to(y)) return Smi::FromInt(EQUAL);
- if (y->length() == 0) {
- if (x->length() == 0) return Smi::FromInt(EQUAL);
- return Smi::FromInt(GREATER);
- } else if (x->length() == 0) {
- return Smi::FromInt(LESS);
- }
-
- int d = x->Get(0) - y->Get(0);
- if (d < 0)
- return Smi::FromInt(LESS);
- else if (d > 0)
- return Smi::FromInt(GREATER);
-
- // Slow case.
- x = String::Flatten(x);
- y = String::Flatten(y);
-
- DisallowHeapAllocation no_gc;
- Object* equal_prefix_result = Smi::FromInt(EQUAL);
- int prefix_length = x->length();
- if (y->length() < prefix_length) {
- prefix_length = y->length();
- equal_prefix_result = Smi::FromInt(GREATER);
- } else if (y->length() > prefix_length) {
- equal_prefix_result = Smi::FromInt(LESS);
- }
- int r;
- String::FlatContent x_content = x->GetFlatContent();
- String::FlatContent y_content = y->GetFlatContent();
- if (x_content.IsOneByte()) {
- Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
- if (y_content.IsOneByte()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- } else {
- Vector<const uc16> x_chars = x_content.ToUC16Vector();
- if (y_content.IsOneByte()) {
- Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- } else {
- Vector<const uc16> y_chars = y_content.ToUC16Vector();
- r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
- }
- }
- Object* result;
- if (r == 0) {
- result = equal_prefix_result;
- } else {
- result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER);
+ switch (String::Compare(x, y)) {
+ case ComparisonResult::kLessThan:
+ return Smi::FromInt(LESS);
+ case ComparisonResult::kEqual:
+ return Smi::FromInt(EQUAL);
+ case ComparisonResult::kGreaterThan:
+ return Smi::FromInt(GREATER);
+ case ComparisonResult::kUndefined:
+ break;
}
- return result;
+ UNREACHABLE();
+ return Smi::FromInt(0);
}
@@ -1205,28 +1158,6 @@ RUNTIME_FUNCTION(Runtime_NewString) {
}
-RUNTIME_FUNCTION(Runtime_NewConsString) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 4);
- CONVERT_INT32_ARG_CHECKED(length, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
- CONVERT_ARG_HANDLE_CHECKED(String, left, 2);
- CONVERT_ARG_HANDLE_CHECKED(String, right, 3);
-
- Handle<String> result;
- if (is_one_byte) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewOneByteConsString(length, left, right));
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewTwoByteConsString(length, left, right));
- }
- return *result;
-}
-
-
RUNTIME_FUNCTION(Runtime_StringEquals) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
@@ -1321,12 +1252,6 @@ RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
}
-RUNTIME_FUNCTION(Runtime_IsStringWrapperSafeForDefaultValueOf) {
- UNIMPLEMENTED();
- return NULL;
-}
-
-
RUNTIME_FUNCTION(Runtime_StringGetLength) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
diff --git a/deps/v8/src/runtime/runtime-symbol.cc b/deps/v8/src/runtime/runtime-symbol.cc
index 8f99a37c42..778c241709 100644
--- a/deps/v8/src/runtime/runtime-symbol.cc
+++ b/deps/v8/src/runtime/runtime-symbol.cc
@@ -5,7 +5,9 @@
#include "src/runtime/runtime-utils.h"
#include "src/arguments.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
+#include "src/string-builder.h"
namespace v8 {
namespace internal {
@@ -30,27 +32,6 @@ RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
}
-RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
- HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
- Handle<JSObject> registry = isolate->GetSymbolRegistry();
- Handle<String> part = isolate->factory()->private_intern_string();
- Handle<Object> privates;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, privates,
- Object::GetProperty(registry, part));
- Handle<Object> symbol;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, symbol,
- Object::GetProperty(privates, name));
- if (!symbol->IsSymbol()) {
- DCHECK(symbol->IsUndefined());
- symbol = isolate->factory()->NewPrivateSymbol(name);
- JSObject::AddProperty(Handle<JSObject>::cast(privates), name, symbol, NONE);
- }
- return *symbol;
-}
-
-
RUNTIME_FUNCTION(Runtime_SymbolDescription) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
@@ -59,6 +40,22 @@ RUNTIME_FUNCTION(Runtime_SymbolDescription) {
}
+RUNTIME_FUNCTION(Runtime_SymbolDescriptiveString) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
+ IncrementalStringBuilder builder(isolate);
+ builder.AppendCString("Symbol(");
+ if (symbol->name()->IsString()) {
+ builder.AppendString(handle(String::cast(symbol->name()), isolate));
+ }
+ builder.AppendCharacter(')');
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, builder.Finish());
+ return *result;
+}
+
+
RUNTIME_FUNCTION(Runtime_SymbolRegistry) {
HandleScope scope(isolate);
DCHECK(args.length() == 0);
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 73f6478338..fdfa42a6af 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -400,7 +400,7 @@ RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
DCHECK(args.length() == 1);
// Get the function and make sure it is compiled.
CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
+ if (!Compiler::Compile(func, KEEP_EXCEPTION)) {
return isolate->heap()->exception();
}
OFStream os(stdout);
@@ -467,6 +467,14 @@ RUNTIME_FUNCTION(Runtime_HaveSameMap) {
}
+RUNTIME_FUNCTION(Runtime_InNewSpace) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(isolate->heap()->InNewSpace(obj));
+}
+
+
#define ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(Name) \
RUNTIME_FUNCTION(Runtime_Has##Name) { \
CONVERT_ARG_CHECKED(JSObject, obj, 0); \
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index ffa4120903..8a3fce0a92 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -13,64 +13,6 @@
namespace v8 {
namespace internal {
-void Runtime::SetupArrayBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- bool is_external, void* data,
- size_t allocated_length, SharedFlag shared) {
- DCHECK(array_buffer->GetInternalFieldCount() ==
- v8::ArrayBuffer::kInternalFieldCount);
- for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
- array_buffer->SetInternalField(i, Smi::FromInt(0));
- }
- array_buffer->set_backing_store(data);
- array_buffer->set_bit_field(0);
- array_buffer->set_is_external(is_external);
- array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
- array_buffer->set_is_shared(shared == SharedFlag::kShared);
-
- if (data && !is_external) {
- isolate->heap()->RegisterNewArrayBuffer(
- isolate->heap()->InNewSpace(*array_buffer), data, allocated_length);
- }
-
- Handle<Object> byte_length =
- isolate->factory()->NewNumberFromSize(allocated_length);
- CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
- array_buffer->set_byte_length(*byte_length);
-}
-
-
-bool Runtime::SetupArrayBufferAllocatingData(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- size_t allocated_length,
- bool initialize,
- SharedFlag shared) {
- void* data;
- CHECK(isolate->array_buffer_allocator() != NULL);
- // Prevent creating array buffers when serializing.
- DCHECK(!isolate->serializer_enabled());
- if (allocated_length != 0) {
- if (initialize) {
- data = isolate->array_buffer_allocator()->Allocate(allocated_length);
- } else {
- data = isolate->array_buffer_allocator()->AllocateUninitialized(
- allocated_length);
- }
- if (data == NULL) return false;
- } else {
- data = NULL;
- }
-
- SetupArrayBuffer(isolate, array_buffer, false, data, allocated_length,
- shared);
- return true;
-}
-
-
-void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
- array_buffer->Neuter();
-}
-
RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
HandleScope scope(isolate);
@@ -87,8 +29,8 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
- if (!Runtime::SetupArrayBufferAllocatingData(
- isolate, holder, allocated_length, true,
+ if (!JSArrayBuffer::SetupAllocatingData(
+ holder, isolate, allocated_length, true,
is_shared ? SharedFlag::kShared : SharedFlag::kNotShared)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
@@ -150,9 +92,8 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
void* backing_store = array_buffer->backing_store();
size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
array_buffer->set_is_external(true);
- Runtime::NeuterArrayBuffer(array_buffer);
- isolate->heap()->UnregisterArrayBuffer(
- isolate->heap()->InNewSpace(*array_buffer), backing_store);
+ isolate->heap()->UnregisterArrayBuffer(*array_buffer);
+ array_buffer->Neuter();
isolate->array_buffer_allocator()->Free(backing_store, byte_length);
return isolate->heap()->undefined_value();
}
@@ -244,8 +185,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
holder->set_elements(*elements);
} else {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBuffer(isolate, buffer, true, NULL, byte_length,
- SharedFlag::kNotShared);
+ JSArrayBuffer::Setup(buffer, isolate, true, NULL, byte_length,
+ SharedFlag::kNotShared);
holder->set_buffer(*buffer);
Handle<FixedTypedArrayBase> elements =
isolate->factory()->NewFixedTypedArray(static_cast<int>(length),
@@ -319,8 +260,8 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
//
// TODO(dslomov): revise this once we support subclassing.
- if (!Runtime::SetupArrayBufferAllocatingData(isolate, buffer, byte_length,
- false)) {
+ if (!JSArrayBuffer::SetupAllocatingData(buffer, isolate, byte_length,
+ false)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
diff --git a/deps/v8/src/runtime/runtime-uri.cc b/deps/v8/src/runtime/runtime-uri.cc
index 4f77af81cb..e64e9dcea7 100644
--- a/deps/v8/src/runtime/runtime-uri.cc
+++ b/deps/v8/src/runtime/runtime-uri.cc
@@ -6,6 +6,7 @@
#include "src/arguments.h"
#include "src/conversions.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
#include "src/string-search.h"
#include "src/utils.h"
@@ -257,13 +258,15 @@ MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
RUNTIME_FUNCTION(Runtime_URIEscape) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = String::Flatten(source);
- DCHECK(string->IsFlat());
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<String> source;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
+ Object::ToString(isolate, input));
+ source = String::Flatten(source);
Handle<String> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, string->IsOneByteRepresentationUnderneath()
+ isolate, result, source->IsOneByteRepresentationUnderneath()
? URIEscape::Escape<uint8_t>(isolate, source)
: URIEscape::Escape<uc16>(isolate, source));
return *result;
@@ -273,15 +276,18 @@ RUNTIME_FUNCTION(Runtime_URIEscape) {
RUNTIME_FUNCTION(Runtime_URIUnescape) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
- Handle<String> string = String::Flatten(source);
- DCHECK(string->IsFlat());
+ CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
+ Handle<String> source;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, source,
+ Object::ToString(isolate, input));
+ source = String::Flatten(source);
Handle<String> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, string->IsOneByteRepresentationUnderneath()
+ isolate, result, source->IsOneByteRepresentationUnderneath()
? URIUnescape::Unescape<uint8_t>(isolate, source)
: URIUnescape::Unescape<uc16>(isolate, source));
return *result;
}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index a490327af5..15451c5c6e 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -4,6 +4,7 @@
#include "src/runtime/runtime.h"
+#include "src/contexts.h"
#include "src/handles-inl.h"
#include "src/heap/heap.h"
#include "src/isolate.h"
@@ -41,9 +42,10 @@ FOR_EACH_INTRINSIC_RETURN_PAIR(P)
} \
,
-
static const Runtime::Function kIntrinsicFunctions[] = {
- FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)};
+ FOR_EACH_INTRINSIC(F)
+ FOR_EACH_INTRINSIC(I)
+};
#undef I
#undef F
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 4545426e83..6e55d74794 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -7,6 +7,7 @@
#include "src/allocation.h"
#include "src/objects.h"
+#include "src/unicode.h"
#include "src/zone.h"
namespace v8 {
@@ -34,7 +35,6 @@ namespace internal {
F(SpecialArrayFunctions, 0, 1) \
F(TransitionElementsKind, 2, 1) \
F(PushIfAbsent, 2, 1) \
- F(ArrayConcat, 1, 1) \
F(RemoveArrayHoles, 2, 1) \
F(MoveArrayContents, 2, 1) \
F(EstimateNumberOfElements, 1, 1) \
@@ -80,10 +80,8 @@ namespace internal {
F(ThrowArrayNotSubclassableError, 0, 1) \
F(ThrowStaticPrototypeError, 0, 1) \
F(ThrowIfStaticPrototype, 1, 1) \
- F(ToMethod, 2, 1) \
F(HomeObjectSymbol, 0, 1) \
F(DefineClass, 5, 1) \
- F(DefineClassStrong, 5, 1) \
F(FinalizeClassDefinition, 2, 1) \
F(DefineClassMethod, 3, 1) \
F(ClassGetSourceCode, 1, 1) \
@@ -218,8 +216,19 @@ namespace internal {
F(ForInStep, 1, 1)
+#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
+ F(InterpreterEquals, 2, 1) \
+ F(InterpreterNotEquals, 2, 1) \
+ F(InterpreterStrictEquals, 2, 1) \
+ F(InterpreterStrictNotEquals, 2, 1) \
+ F(InterpreterLessThan, 2, 1) \
+ F(InterpreterGreaterThan, 2, 1) \
+ F(InterpreterLessThanOrEqual, 2, 1) \
+ F(InterpreterGreaterThanOrEqual, 2, 1) \
+ F(InterpreterToBoolean, 1, 1)
+
+
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
- F(IsSloppyModeFunction, 1, 1) \
F(FunctionGetName, 1, 1) \
F(FunctionSetName, 2, 1) \
F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
@@ -235,7 +244,7 @@ namespace internal {
F(FunctionSetLength, 2, 1) \
F(FunctionSetPrototype, 2, 1) \
F(FunctionIsAPIFunction, 1, 1) \
- F(FunctionIsBuiltin, 1, 1) \
+ F(FunctionHidesSource, 1, 1) \
F(SetCode, 2, 1) \
F(SetNativeFlag, 1, 1) \
F(ThrowStrongModeTooFewArguments, 0, 1) \
@@ -246,8 +255,6 @@ namespace internal {
F(NewObjectFromBound, 1, 1) \
F(Call, -1 /* >= 2 */, 1) \
F(Apply, 5, 1) \
- F(GetFunctionDelegate, 1, 1) \
- F(GetConstructorDelegate, 1, 1) \
F(GetOriginalConstructor, 0, 1) \
F(CallFunction, -1 /* receiver + n args + function */, 1) \
F(IsConstructCall, 0, 1) \
@@ -301,9 +308,9 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERNAL(F) \
F(CheckIsBootstrapping, 0, 1) \
- F(ImportToRuntime, 1, 1) \
- F(ImportExperimentalToRuntime, 1, 1) \
- F(InstallJSBuiltins, 1, 1) \
+ F(ExportFromRuntime, 1, 1) \
+ F(ExportExperimentalFromRuntime, 1, 1) \
+ F(InstallToContext, 1, 1) \
F(Throw, 1, 1) \
F(ReThrow, 1, 1) \
F(UnwindAndFindExceptionHandler, 0, 1) \
@@ -313,30 +320,29 @@ namespace internal {
F(NewSyntaxError, 2, 1) \
F(NewReferenceError, 2, 1) \
F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
F(ThrowStrongModeImplicitConversion, 0, 1) \
F(PromiseRejectEvent, 3, 1) \
F(PromiseRevokeReject, 1, 1) \
- F(PromiseHasHandlerSymbol, 0, 1) \
F(StackGuard, 0, 1) \
F(Interrupt, 0, 1) \
F(AllocateInNewSpace, 1, 1) \
F(AllocateInTargetSpace, 2, 1) \
F(CollectStackTrace, 2, 1) \
- F(RenderCallSite, 0, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
F(ErrorToStringRT, 1, 1) \
F(FormatMessageString, 4, 1) \
- F(CallSiteGetFileNameRT, 3, 1) \
- F(CallSiteGetFunctionNameRT, 3, 1) \
- F(CallSiteGetScriptNameOrSourceUrlRT, 3, 1) \
- F(CallSiteGetMethodNameRT, 3, 1) \
- F(CallSiteGetLineNumberRT, 3, 1) \
- F(CallSiteGetColumnNumberRT, 3, 1) \
- F(CallSiteIsNativeRT, 3, 1) \
- F(CallSiteIsToplevelRT, 3, 1) \
- F(CallSiteIsEvalRT, 3, 1) \
- F(CallSiteIsConstructorRT, 3, 1) \
+ F(CallSiteGetFileNameRT, 1, 1) \
+ F(CallSiteGetFunctionNameRT, 1, 1) \
+ F(CallSiteGetScriptNameOrSourceUrlRT, 1, 1) \
+ F(CallSiteGetMethodNameRT, 1, 1) \
+ F(CallSiteGetLineNumberRT, 1, 1) \
+ F(CallSiteGetColumnNumberRT, 1, 1) \
+ F(CallSiteIsNativeRT, 1, 1) \
+ F(CallSiteIsToplevelRT, 1, 1) \
+ F(CallSiteIsEvalRT, 1, 1) \
+ F(CallSiteIsConstructorRT, 1, 1) \
F(IS_VAR, 1, 1) \
F(IncrementStatsCounter, 1, 1) \
F(Likely, 1, 1) \
@@ -344,7 +350,8 @@ namespace internal {
F(HarmonyToString, 0, 1) \
F(GetTypeFeedbackVector, 1, 1) \
F(GetCallerJSFunction, 0, 1) \
- F(GetCodeStubExportsObject, 0, 1)
+ F(GetCodeStubExportsObject, 0, 1) \
+ F(ThrowCalledNonCallable, 1, 1)
#define FOR_EACH_INTRINSIC_JSON(F) \
@@ -406,28 +413,12 @@ namespace internal {
F(StringParseFloat, 1, 1) \
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
- F(NumberToInteger, 1, 1) \
F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToSmi, 1, 1) \
- F(NumberAdd, 2, 1) \
- F(NumberSub, 2, 1) \
- F(NumberMul, 2, 1) \
- F(NumberUnaryMinus, 1, 1) \
- F(NumberDiv, 2, 1) \
- F(NumberMod, 2, 1) \
F(NumberImul, 2, 1) \
- F(NumberOr, 2, 1) \
- F(NumberAnd, 2, 1) \
- F(NumberXor, 2, 1) \
- F(NumberShl, 2, 1) \
- F(NumberShr, 2, 1) \
- F(NumberSar, 2, 1) \
- F(NumberEquals, 2, 1) \
- F(NumberCompare, 3, 1) \
F(SmiLexicographicCompare, 2, 1) \
F(MaxSmi, 0, 1) \
F(IsSmi, 1, 1) \
- F(IsNonNegativeSmi, 1, 1) \
F(GetRootNaN, 0, 1)
@@ -435,7 +426,6 @@ namespace internal {
F(GetPrototype, 1, 1) \
F(InternalSetPrototype, 2, 1) \
F(SetPrototype, 2, 1) \
- F(IsInPrototypeChain, 2, 1) \
F(GetOwnProperty, 2, 1) \
F(PreventExtensions, 1, 1) \
F(IsExtensible, 1, 1) \
@@ -457,7 +447,6 @@ namespace internal {
F(DeleteProperty_Strict, 2, 1) \
F(HasOwnProperty, 2, 1) \
F(HasProperty, 2, 1) \
- F(HasElement, 2, 1) \
F(IsPropertyEnumerable, 2, 1) \
F(GetPropertyNamesFast, 1, 1) \
F(GetOwnPropertyNames, 2, 1) \
@@ -467,10 +456,8 @@ namespace internal {
F(GetIndexedInterceptorElementNames, 1, 1) \
F(OwnKeys, 1, 1) \
F(ToFastProperties, 1, 1) \
- F(NewStringWrapper, 1, 1) \
F(AllocateHeapNumber, 0, 1) \
F(NewObject, 2, 1) \
- F(NewObjectWithAllocationSite, 3, 1) \
F(FinalizeInstanceSize, 1, 1) \
F(GlobalProxy, 1, 1) \
F(LookupAccessor, 3, 1) \
@@ -487,14 +474,27 @@ namespace internal {
F(HeapObjectGetMap, 1, 1) \
F(MapGetInstanceType, 1, 1) \
F(ObjectEquals, 2, 1) \
- F(IsObject, 1, 1) \
F(IsSpecObject, 1, 1) \
F(IsStrong, 1, 1) \
F(ClassOf, 1, 1) \
F(DefineGetterPropertyUnchecked, 4, 1) \
F(DefineSetterPropertyUnchecked, 4, 1) \
F(ToObject, 1, 1) \
+ F(ToPrimitive, 1, 1) \
+ F(ToPrimitive_Number, 1, 1) \
+ F(ToPrimitive_String, 1, 1) \
+ F(ToNumber, 1, 1) \
+ F(ToInteger, 1, 1) \
+ F(ToLength, 1, 1) \
+ F(ToString, 1, 1) \
+ F(ToName, 1, 1) \
+ F(Equals, 2, 1) \
F(StrictEquals, 2, 1) \
+ F(Compare, 3, 1) \
+ F(Compare_Strong, 3, 1) \
+ F(InstanceOf, 2, 1) \
+ F(HasInPrototypeChain, 2, 1) \
+ F(CreateIterResultObject, 2, 1) \
F(IsAccessCheckNeeded, 1, 1)
@@ -512,6 +512,30 @@ namespace internal {
F(GetObjectContextNotifierPerformChange, 1, 1)
+#define FOR_EACH_INTRINSIC_OPERATORS(F) \
+ F(Multiply, 2, 1) \
+ F(Multiply_Strong, 2, 1) \
+ F(Divide, 2, 1) \
+ F(Divide_Strong, 2, 1) \
+ F(Modulus, 2, 1) \
+ F(Modulus_Strong, 2, 1) \
+ F(Add, 2, 1) \
+ F(Add_Strong, 2, 1) \
+ F(Subtract, 2, 1) \
+ F(Subtract_Strong, 2, 1) \
+ F(ShiftLeft, 2, 1) \
+ F(ShiftLeft_Strong, 2, 1) \
+ F(ShiftRight, 2, 1) \
+ F(ShiftRight_Strong, 2, 1) \
+ F(ShiftRightLogical, 2, 1) \
+ F(ShiftRightLogical_Strong, 2, 1) \
+ F(BitwiseAnd, 2, 1) \
+ F(BitwiseAnd_Strong, 2, 1) \
+ F(BitwiseOr, 2, 1) \
+ F(BitwiseOr_Strong, 2, 1) \
+ F(BitwiseXor, 2, 1) \
+ F(BitwiseXor_Strong, 2, 1)
+
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(CreateJSProxy, 2, 1) \
F(CreateJSFunctionProxy, 4, 1) \
@@ -535,206 +559,338 @@ namespace internal {
F(IsRegExp, 1, 1)
-#define FOR_EACH_INTRINSIC_SCOPES(F) \
- F(ThrowConstAssignError, 0, 1) \
- F(DeclareGlobals, 2, 1) \
- F(InitializeVarGlobal, 3, 1) \
- F(InitializeConstGlobal, 2, 1) \
- F(DeclareLookupSlot, 2, 1) \
- F(DeclareReadOnlyLookupSlot, 2, 1) \
- F(InitializeLegacyConstLookupSlot, 3, 1) \
- F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
- F(NewSloppyArguments, 3, 1) \
- F(NewStrictArguments, 3, 1) \
- F(NewRestParam, 4, 1) \
- F(NewRestParamSlow, 2, 1) \
- F(NewClosureFromStubFailure, 1, 1) \
- F(NewClosure, 3, 1) \
- F(NewScriptContext, 2, 1) \
- F(NewFunctionContext, 1, 1) \
- F(PushWithContext, 2, 1) \
- F(PushCatchContext, 3, 1) \
- F(PushBlockContext, 2, 1) \
- F(IsJSModule, 1, 1) \
- F(PushModuleContext, 2, 1) \
- F(DeclareModules, 1, 1) \
- F(DeleteLookupSlot, 2, 1) \
- F(StoreLookupSlot, 4, 1) \
- F(ArgumentsLength, 0, 1) \
+#define FOR_EACH_INTRINSIC_SCOPES(F) \
+ F(ThrowConstAssignError, 0, 1) \
+ F(DeclareGlobals, 2, 1) \
+ F(InitializeVarGlobal, 3, 1) \
+ F(InitializeConstGlobal, 2, 1) \
+ F(DeclareLookupSlot, 2, 1) \
+ F(DeclareReadOnlyLookupSlot, 2, 1) \
+ F(InitializeLegacyConstLookupSlot, 3, 1) \
+ F(NewSloppyArguments_Generic, 1, 1) \
+ F(NewStrictArguments_Generic, 1, 1) \
+ F(NewSloppyArguments, 3, 1) \
+ F(NewStrictArguments, 3, 1) \
+ F(NewClosure, 1, 1) \
+ F(NewClosure_Tenured, 1, 1) \
+ F(NewScriptContext, 2, 1) \
+ F(NewFunctionContext, 1, 1) \
+ F(PushWithContext, 2, 1) \
+ F(PushCatchContext, 3, 1) \
+ F(PushBlockContext, 2, 1) \
+ F(IsJSModule, 1, 1) \
+ F(PushModuleContext, 2, 1) \
+ F(DeclareModules, 1, 1) \
+ F(DeleteLookupSlot, 2, 1) \
+ F(StoreLookupSlot, 4, 1) \
+ F(ArgumentsLength, 0, 1) \
F(Arguments, 1, 1)
-#define FOR_EACH_INTRINSIC_SIMD(F) \
- F(IsSimdValue, 1, 1) \
- F(SimdToObject, 1, 1) \
- F(SimdEquals, 2, 1) \
- F(SimdSameValue, 2, 1) \
- F(SimdSameValueZero, 2, 1) \
- F(CreateFloat32x4, 4, 1) \
- F(CreateInt32x4, 4, 1) \
- F(CreateBool32x4, 4, 1) \
- F(CreateInt16x8, 8, 1) \
- F(CreateBool16x8, 8, 1) \
- F(CreateInt8x16, 16, 1) \
- F(CreateBool8x16, 16, 1) \
- F(Float32x4Check, 1, 1) \
- F(Float32x4ExtractLane, 2, 1) \
- F(Float32x4ReplaceLane, 3, 1) \
- F(Float32x4Abs, 1, 1) \
- F(Float32x4Neg, 1, 1) \
- F(Float32x4Sqrt, 1, 1) \
- F(Float32x4RecipApprox, 1, 1) \
- F(Float32x4RecipSqrtApprox, 1, 1) \
- F(Float32x4Add, 2, 1) \
- F(Float32x4Sub, 2, 1) \
- F(Float32x4Mul, 2, 1) \
- F(Float32x4Div, 2, 1) \
- F(Float32x4Min, 2, 1) \
- F(Float32x4Max, 2, 1) \
- F(Float32x4MinNum, 2, 1) \
- F(Float32x4MaxNum, 2, 1) \
- F(Float32x4LessThan, 2, 1) \
- F(Float32x4LessThanOrEqual, 2, 1) \
- F(Float32x4GreaterThan, 2, 1) \
- F(Float32x4GreaterThanOrEqual, 2, 1) \
- F(Float32x4Equal, 2, 1) \
- F(Float32x4NotEqual, 2, 1) \
- F(Float32x4Select, 3, 1) \
- F(Float32x4Swizzle, 5, 1) \
- F(Float32x4Shuffle, 6, 1) \
- F(Float32x4FromInt32x4, 1, 1) \
- F(Float32x4FromInt32x4Bits, 1, 1) \
- F(Float32x4FromInt16x8Bits, 1, 1) \
- F(Float32x4FromInt8x16Bits, 1, 1) \
- F(Int32x4Check, 1, 1) \
- F(Int32x4ExtractLane, 2, 1) \
- F(Int32x4ReplaceLane, 3, 1) \
- F(Int32x4Neg, 1, 1) \
- F(Int32x4Add, 2, 1) \
- F(Int32x4Sub, 2, 1) \
- F(Int32x4Mul, 2, 1) \
- F(Int32x4Min, 2, 1) \
- F(Int32x4Max, 2, 1) \
- F(Int32x4And, 2, 1) \
- F(Int32x4Or, 2, 1) \
- F(Int32x4Xor, 2, 1) \
- F(Int32x4Not, 1, 1) \
- F(Int32x4ShiftLeftByScalar, 2, 1) \
- F(Int32x4ShiftRightLogicalByScalar, 2, 1) \
- F(Int32x4ShiftRightArithmeticByScalar, 2, 1) \
- F(Int32x4LessThan, 2, 1) \
- F(Int32x4LessThanOrEqual, 2, 1) \
- F(Int32x4GreaterThan, 2, 1) \
- F(Int32x4GreaterThanOrEqual, 2, 1) \
- F(Int32x4Equal, 2, 1) \
- F(Int32x4NotEqual, 2, 1) \
- F(Int32x4Select, 3, 1) \
- F(Int32x4Swizzle, 5, 1) \
- F(Int32x4Shuffle, 6, 1) \
- F(Int32x4FromFloat32x4, 1, 1) \
- F(Int32x4FromFloat32x4Bits, 1, 1) \
- F(Int32x4FromInt16x8Bits, 1, 1) \
- F(Int32x4FromInt8x16Bits, 1, 1) \
- F(Bool32x4Check, 1, 1) \
- F(Bool32x4ExtractLane, 2, 1) \
- F(Bool32x4ReplaceLane, 3, 1) \
- F(Bool32x4And, 2, 1) \
- F(Bool32x4Or, 2, 1) \
- F(Bool32x4Xor, 2, 1) \
- F(Bool32x4Not, 1, 1) \
- F(Bool32x4AnyTrue, 1, 1) \
- F(Bool32x4AllTrue, 1, 1) \
- F(Bool32x4Equal, 2, 1) \
- F(Bool32x4NotEqual, 2, 1) \
- F(Bool32x4Swizzle, 5, 1) \
- F(Bool32x4Shuffle, 6, 1) \
- F(Int16x8Check, 1, 1) \
- F(Int16x8ExtractLane, 2, 1) \
- F(Int16x8UnsignedExtractLane, 2, 1) \
- F(Int16x8ReplaceLane, 3, 1) \
- F(Int16x8Neg, 1, 1) \
- F(Int16x8Add, 2, 1) \
- F(Int16x8AddSaturate, 2, 1) \
- F(Int16x8Sub, 2, 1) \
- F(Int16x8SubSaturate, 2, 1) \
- F(Int16x8Mul, 2, 1) \
- F(Int16x8Min, 2, 1) \
- F(Int16x8Max, 2, 1) \
- F(Int16x8And, 2, 1) \
- F(Int16x8Or, 2, 1) \
- F(Int16x8Xor, 2, 1) \
- F(Int16x8Not, 1, 1) \
- F(Int16x8ShiftLeftByScalar, 2, 1) \
- F(Int16x8ShiftRightLogicalByScalar, 2, 1) \
- F(Int16x8ShiftRightArithmeticByScalar, 2, 1) \
- F(Int16x8LessThan, 2, 1) \
- F(Int16x8LessThanOrEqual, 2, 1) \
- F(Int16x8GreaterThan, 2, 1) \
- F(Int16x8GreaterThanOrEqual, 2, 1) \
- F(Int16x8Equal, 2, 1) \
- F(Int16x8NotEqual, 2, 1) \
- F(Int16x8Select, 3, 1) \
- F(Int16x8Swizzle, 9, 1) \
- F(Int16x8Shuffle, 10, 1) \
- F(Int16x8FromFloat32x4Bits, 1, 1) \
- F(Int16x8FromInt32x4Bits, 1, 1) \
- F(Int16x8FromInt8x16Bits, 1, 1) \
- F(Bool16x8Check, 1, 1) \
- F(Bool16x8ExtractLane, 2, 1) \
- F(Bool16x8ReplaceLane, 3, 1) \
- F(Bool16x8And, 2, 1) \
- F(Bool16x8Or, 2, 1) \
- F(Bool16x8Xor, 2, 1) \
- F(Bool16x8Not, 1, 1) \
- F(Bool16x8AnyTrue, 1, 1) \
- F(Bool16x8AllTrue, 1, 1) \
- F(Bool16x8Equal, 2, 1) \
- F(Bool16x8NotEqual, 2, 1) \
- F(Bool16x8Swizzle, 9, 1) \
- F(Bool16x8Shuffle, 10, 1) \
- F(Int8x16Check, 1, 1) \
- F(Int8x16ExtractLane, 2, 1) \
- F(Int8x16UnsignedExtractLane, 2, 1) \
- F(Int8x16ReplaceLane, 3, 1) \
- F(Int8x16Neg, 1, 1) \
- F(Int8x16Add, 2, 1) \
- F(Int8x16AddSaturate, 2, 1) \
- F(Int8x16Sub, 2, 1) \
- F(Int8x16SubSaturate, 2, 1) \
- F(Int8x16Mul, 2, 1) \
- F(Int8x16Min, 2, 1) \
- F(Int8x16Max, 2, 1) \
- F(Int8x16And, 2, 1) \
- F(Int8x16Or, 2, 1) \
- F(Int8x16Xor, 2, 1) \
- F(Int8x16Not, 1, 1) \
- F(Int8x16ShiftLeftByScalar, 2, 1) \
- F(Int8x16ShiftRightLogicalByScalar, 2, 1) \
- F(Int8x16ShiftRightArithmeticByScalar, 2, 1) \
- F(Int8x16LessThan, 2, 1) \
- F(Int8x16LessThanOrEqual, 2, 1) \
- F(Int8x16GreaterThan, 2, 1) \
- F(Int8x16GreaterThanOrEqual, 2, 1) \
- F(Int8x16Equal, 2, 1) \
- F(Int8x16NotEqual, 2, 1) \
- F(Int8x16Select, 3, 1) \
- F(Int8x16Swizzle, 17, 1) \
- F(Int8x16Shuffle, 18, 1) \
- F(Int8x16FromFloat32x4Bits, 1, 1) \
- F(Int8x16FromInt32x4Bits, 1, 1) \
- F(Int8x16FromInt16x8Bits, 1, 1) \
- F(Bool8x16Check, 1, 1) \
- F(Bool8x16ExtractLane, 2, 1) \
- F(Bool8x16ReplaceLane, 3, 1) \
- F(Bool8x16And, 2, 1) \
- F(Bool8x16Or, 2, 1) \
- F(Bool8x16Xor, 2, 1) \
- F(Bool8x16Not, 1, 1) \
- F(Bool8x16AnyTrue, 1, 1) \
- F(Bool8x16AllTrue, 1, 1) \
- F(Bool8x16Equal, 2, 1) \
- F(Bool8x16NotEqual, 2, 1) \
- F(Bool8x16Swizzle, 17, 1) \
+#define FOR_EACH_INTRINSIC_SIMD(F) \
+ F(IsSimdValue, 1, 1) \
+ F(SimdSameValue, 2, 1) \
+ F(SimdSameValueZero, 2, 1) \
+ F(CreateFloat32x4, 4, 1) \
+ F(CreateInt32x4, 4, 1) \
+ F(CreateUint32x4, 4, 1) \
+ F(CreateBool32x4, 4, 1) \
+ F(CreateInt16x8, 8, 1) \
+ F(CreateUint16x8, 8, 1) \
+ F(CreateBool16x8, 8, 1) \
+ F(CreateInt8x16, 16, 1) \
+ F(CreateUint8x16, 16, 1) \
+ F(CreateBool8x16, 16, 1) \
+ F(Float32x4Check, 1, 1) \
+ F(Float32x4ExtractLane, 2, 1) \
+ F(Float32x4ReplaceLane, 3, 1) \
+ F(Float32x4Abs, 1, 1) \
+ F(Float32x4Neg, 1, 1) \
+ F(Float32x4Sqrt, 1, 1) \
+ F(Float32x4RecipApprox, 1, 1) \
+ F(Float32x4RecipSqrtApprox, 1, 1) \
+ F(Float32x4Add, 2, 1) \
+ F(Float32x4Sub, 2, 1) \
+ F(Float32x4Mul, 2, 1) \
+ F(Float32x4Div, 2, 1) \
+ F(Float32x4Min, 2, 1) \
+ F(Float32x4Max, 2, 1) \
+ F(Float32x4MinNum, 2, 1) \
+ F(Float32x4MaxNum, 2, 1) \
+ F(Float32x4Equal, 2, 1) \
+ F(Float32x4NotEqual, 2, 1) \
+ F(Float32x4LessThan, 2, 1) \
+ F(Float32x4LessThanOrEqual, 2, 1) \
+ F(Float32x4GreaterThan, 2, 1) \
+ F(Float32x4GreaterThanOrEqual, 2, 1) \
+ F(Float32x4Select, 3, 1) \
+ F(Float32x4Swizzle, 5, 1) \
+ F(Float32x4Shuffle, 6, 1) \
+ F(Float32x4FromInt32x4, 1, 1) \
+ F(Float32x4FromUint32x4, 1, 1) \
+ F(Float32x4FromInt32x4Bits, 1, 1) \
+ F(Float32x4FromUint32x4Bits, 1, 1) \
+ F(Float32x4FromInt16x8Bits, 1, 1) \
+ F(Float32x4FromUint16x8Bits, 1, 1) \
+ F(Float32x4FromInt8x16Bits, 1, 1) \
+ F(Float32x4FromUint8x16Bits, 1, 1) \
+ F(Float32x4Load, 2, 1) \
+ F(Float32x4Load1, 2, 1) \
+ F(Float32x4Load2, 2, 1) \
+ F(Float32x4Load3, 2, 1) \
+ F(Float32x4Store, 3, 1) \
+ F(Float32x4Store1, 3, 1) \
+ F(Float32x4Store2, 3, 1) \
+ F(Float32x4Store3, 3, 1) \
+ F(Int32x4Check, 1, 1) \
+ F(Int32x4ExtractLane, 2, 1) \
+ F(Int32x4ReplaceLane, 3, 1) \
+ F(Int32x4Neg, 1, 1) \
+ F(Int32x4Add, 2, 1) \
+ F(Int32x4Sub, 2, 1) \
+ F(Int32x4Mul, 2, 1) \
+ F(Int32x4Min, 2, 1) \
+ F(Int32x4Max, 2, 1) \
+ F(Int32x4And, 2, 1) \
+ F(Int32x4Or, 2, 1) \
+ F(Int32x4Xor, 2, 1) \
+ F(Int32x4Not, 1, 1) \
+ F(Int32x4ShiftLeftByScalar, 2, 1) \
+ F(Int32x4ShiftRightByScalar, 2, 1) \
+ F(Int32x4Equal, 2, 1) \
+ F(Int32x4NotEqual, 2, 1) \
+ F(Int32x4LessThan, 2, 1) \
+ F(Int32x4LessThanOrEqual, 2, 1) \
+ F(Int32x4GreaterThan, 2, 1) \
+ F(Int32x4GreaterThanOrEqual, 2, 1) \
+ F(Int32x4Select, 3, 1) \
+ F(Int32x4Swizzle, 5, 1) \
+ F(Int32x4Shuffle, 6, 1) \
+ F(Int32x4FromFloat32x4, 1, 1) \
+ F(Int32x4FromUint32x4, 1, 1) \
+ F(Int32x4FromFloat32x4Bits, 1, 1) \
+ F(Int32x4FromUint32x4Bits, 1, 1) \
+ F(Int32x4FromInt16x8Bits, 1, 1) \
+ F(Int32x4FromUint16x8Bits, 1, 1) \
+ F(Int32x4FromInt8x16Bits, 1, 1) \
+ F(Int32x4FromUint8x16Bits, 1, 1) \
+ F(Int32x4Load, 2, 1) \
+ F(Int32x4Load1, 2, 1) \
+ F(Int32x4Load2, 2, 1) \
+ F(Int32x4Load3, 2, 1) \
+ F(Int32x4Store, 3, 1) \
+ F(Int32x4Store1, 3, 1) \
+ F(Int32x4Store2, 3, 1) \
+ F(Int32x4Store3, 3, 1) \
+ F(Uint32x4Check, 1, 1) \
+ F(Uint32x4ExtractLane, 2, 1) \
+ F(Uint32x4ReplaceLane, 3, 1) \
+ F(Uint32x4Add, 2, 1) \
+ F(Uint32x4Sub, 2, 1) \
+ F(Uint32x4Mul, 2, 1) \
+ F(Uint32x4Min, 2, 1) \
+ F(Uint32x4Max, 2, 1) \
+ F(Uint32x4And, 2, 1) \
+ F(Uint32x4Or, 2, 1) \
+ F(Uint32x4Xor, 2, 1) \
+ F(Uint32x4Not, 1, 1) \
+ F(Uint32x4ShiftLeftByScalar, 2, 1) \
+ F(Uint32x4ShiftRightByScalar, 2, 1) \
+ F(Uint32x4Equal, 2, 1) \
+ F(Uint32x4NotEqual, 2, 1) \
+ F(Uint32x4LessThan, 2, 1) \
+ F(Uint32x4LessThanOrEqual, 2, 1) \
+ F(Uint32x4GreaterThan, 2, 1) \
+ F(Uint32x4GreaterThanOrEqual, 2, 1) \
+ F(Uint32x4Select, 3, 1) \
+ F(Uint32x4Swizzle, 5, 1) \
+ F(Uint32x4Shuffle, 6, 1) \
+ F(Uint32x4FromFloat32x4, 1, 1) \
+ F(Uint32x4FromInt32x4, 1, 1) \
+ F(Uint32x4FromFloat32x4Bits, 1, 1) \
+ F(Uint32x4FromInt32x4Bits, 1, 1) \
+ F(Uint32x4FromInt16x8Bits, 1, 1) \
+ F(Uint32x4FromUint16x8Bits, 1, 1) \
+ F(Uint32x4FromInt8x16Bits, 1, 1) \
+ F(Uint32x4FromUint8x16Bits, 1, 1) \
+ F(Uint32x4Load, 2, 1) \
+ F(Uint32x4Load1, 2, 1) \
+ F(Uint32x4Load2, 2, 1) \
+ F(Uint32x4Load3, 2, 1) \
+ F(Uint32x4Store, 3, 1) \
+ F(Uint32x4Store1, 3, 1) \
+ F(Uint32x4Store2, 3, 1) \
+ F(Uint32x4Store3, 3, 1) \
+ F(Bool32x4Check, 1, 1) \
+ F(Bool32x4ExtractLane, 2, 1) \
+ F(Bool32x4ReplaceLane, 3, 1) \
+ F(Bool32x4And, 2, 1) \
+ F(Bool32x4Or, 2, 1) \
+ F(Bool32x4Xor, 2, 1) \
+ F(Bool32x4Not, 1, 1) \
+ F(Bool32x4AnyTrue, 1, 1) \
+ F(Bool32x4AllTrue, 1, 1) \
+ F(Bool32x4Swizzle, 5, 1) \
+ F(Bool32x4Shuffle, 6, 1) \
+ F(Int16x8Check, 1, 1) \
+ F(Int16x8ExtractLane, 2, 1) \
+ F(Int16x8ReplaceLane, 3, 1) \
+ F(Int16x8Neg, 1, 1) \
+ F(Int16x8Add, 2, 1) \
+ F(Int16x8AddSaturate, 2, 1) \
+ F(Int16x8Sub, 2, 1) \
+ F(Int16x8SubSaturate, 2, 1) \
+ F(Int16x8Mul, 2, 1) \
+ F(Int16x8Min, 2, 1) \
+ F(Int16x8Max, 2, 1) \
+ F(Int16x8And, 2, 1) \
+ F(Int16x8Or, 2, 1) \
+ F(Int16x8Xor, 2, 1) \
+ F(Int16x8Not, 1, 1) \
+ F(Int16x8ShiftLeftByScalar, 2, 1) \
+ F(Int16x8ShiftRightByScalar, 2, 1) \
+ F(Int16x8Equal, 2, 1) \
+ F(Int16x8NotEqual, 2, 1) \
+ F(Int16x8LessThan, 2, 1) \
+ F(Int16x8LessThanOrEqual, 2, 1) \
+ F(Int16x8GreaterThan, 2, 1) \
+ F(Int16x8GreaterThanOrEqual, 2, 1) \
+ F(Int16x8Select, 3, 1) \
+ F(Int16x8Swizzle, 9, 1) \
+ F(Int16x8Shuffle, 10, 1) \
+ F(Int16x8FromUint16x8, 1, 1) \
+ F(Int16x8FromFloat32x4Bits, 1, 1) \
+ F(Int16x8FromInt32x4Bits, 1, 1) \
+ F(Int16x8FromUint32x4Bits, 1, 1) \
+ F(Int16x8FromUint16x8Bits, 1, 1) \
+ F(Int16x8FromInt8x16Bits, 1, 1) \
+ F(Int16x8FromUint8x16Bits, 1, 1) \
+ F(Int16x8Load, 2, 1) \
+ F(Int16x8Store, 3, 1) \
+ F(Uint16x8Check, 1, 1) \
+ F(Uint16x8ExtractLane, 2, 1) \
+ F(Uint16x8ReplaceLane, 3, 1) \
+ F(Uint16x8Add, 2, 1) \
+ F(Uint16x8AddSaturate, 2, 1) \
+ F(Uint16x8Sub, 2, 1) \
+ F(Uint16x8SubSaturate, 2, 1) \
+ F(Uint16x8Mul, 2, 1) \
+ F(Uint16x8Min, 2, 1) \
+ F(Uint16x8Max, 2, 1) \
+ F(Uint16x8And, 2, 1) \
+ F(Uint16x8Or, 2, 1) \
+ F(Uint16x8Xor, 2, 1) \
+ F(Uint16x8Not, 1, 1) \
+ F(Uint16x8ShiftLeftByScalar, 2, 1) \
+ F(Uint16x8ShiftRightByScalar, 2, 1) \
+ F(Uint16x8Equal, 2, 1) \
+ F(Uint16x8NotEqual, 2, 1) \
+ F(Uint16x8LessThan, 2, 1) \
+ F(Uint16x8LessThanOrEqual, 2, 1) \
+ F(Uint16x8GreaterThan, 2, 1) \
+ F(Uint16x8GreaterThanOrEqual, 2, 1) \
+ F(Uint16x8Select, 3, 1) \
+ F(Uint16x8Swizzle, 9, 1) \
+ F(Uint16x8Shuffle, 10, 1) \
+ F(Uint16x8FromInt16x8, 1, 1) \
+ F(Uint16x8FromFloat32x4Bits, 1, 1) \
+ F(Uint16x8FromInt32x4Bits, 1, 1) \
+ F(Uint16x8FromUint32x4Bits, 1, 1) \
+ F(Uint16x8FromInt16x8Bits, 1, 1) \
+ F(Uint16x8FromInt8x16Bits, 1, 1) \
+ F(Uint16x8FromUint8x16Bits, 1, 1) \
+ F(Uint16x8Load, 2, 1) \
+ F(Uint16x8Store, 3, 1) \
+ F(Bool16x8Check, 1, 1) \
+ F(Bool16x8ExtractLane, 2, 1) \
+ F(Bool16x8ReplaceLane, 3, 1) \
+ F(Bool16x8And, 2, 1) \
+ F(Bool16x8Or, 2, 1) \
+ F(Bool16x8Xor, 2, 1) \
+ F(Bool16x8Not, 1, 1) \
+ F(Bool16x8AnyTrue, 1, 1) \
+ F(Bool16x8AllTrue, 1, 1) \
+ F(Bool16x8Swizzle, 9, 1) \
+ F(Bool16x8Shuffle, 10, 1) \
+ F(Int8x16Check, 1, 1) \
+ F(Int8x16ExtractLane, 2, 1) \
+ F(Int8x16ReplaceLane, 3, 1) \
+ F(Int8x16Neg, 1, 1) \
+ F(Int8x16Add, 2, 1) \
+ F(Int8x16AddSaturate, 2, 1) \
+ F(Int8x16Sub, 2, 1) \
+ F(Int8x16SubSaturate, 2, 1) \
+ F(Int8x16Mul, 2, 1) \
+ F(Int8x16Min, 2, 1) \
+ F(Int8x16Max, 2, 1) \
+ F(Int8x16And, 2, 1) \
+ F(Int8x16Or, 2, 1) \
+ F(Int8x16Xor, 2, 1) \
+ F(Int8x16Not, 1, 1) \
+ F(Int8x16ShiftLeftByScalar, 2, 1) \
+ F(Int8x16ShiftRightByScalar, 2, 1) \
+ F(Int8x16Equal, 2, 1) \
+ F(Int8x16NotEqual, 2, 1) \
+ F(Int8x16LessThan, 2, 1) \
+ F(Int8x16LessThanOrEqual, 2, 1) \
+ F(Int8x16GreaterThan, 2, 1) \
+ F(Int8x16GreaterThanOrEqual, 2, 1) \
+ F(Int8x16Select, 3, 1) \
+ F(Int8x16Swizzle, 17, 1) \
+ F(Int8x16Shuffle, 18, 1) \
+ F(Int8x16FromUint8x16, 1, 1) \
+ F(Int8x16FromFloat32x4Bits, 1, 1) \
+ F(Int8x16FromInt32x4Bits, 1, 1) \
+ F(Int8x16FromUint32x4Bits, 1, 1) \
+ F(Int8x16FromInt16x8Bits, 1, 1) \
+ F(Int8x16FromUint16x8Bits, 1, 1) \
+ F(Int8x16FromUint8x16Bits, 1, 1) \
+ F(Int8x16Load, 2, 1) \
+ F(Int8x16Store, 3, 1) \
+ F(Uint8x16Check, 1, 1) \
+ F(Uint8x16ExtractLane, 2, 1) \
+ F(Uint8x16ReplaceLane, 3, 1) \
+ F(Uint8x16Add, 2, 1) \
+ F(Uint8x16AddSaturate, 2, 1) \
+ F(Uint8x16Sub, 2, 1) \
+ F(Uint8x16SubSaturate, 2, 1) \
+ F(Uint8x16Mul, 2, 1) \
+ F(Uint8x16Min, 2, 1) \
+ F(Uint8x16Max, 2, 1) \
+ F(Uint8x16And, 2, 1) \
+ F(Uint8x16Or, 2, 1) \
+ F(Uint8x16Xor, 2, 1) \
+ F(Uint8x16Not, 1, 1) \
+ F(Uint8x16ShiftLeftByScalar, 2, 1) \
+ F(Uint8x16ShiftRightByScalar, 2, 1) \
+ F(Uint8x16Equal, 2, 1) \
+ F(Uint8x16NotEqual, 2, 1) \
+ F(Uint8x16LessThan, 2, 1) \
+ F(Uint8x16LessThanOrEqual, 2, 1) \
+ F(Uint8x16GreaterThan, 2, 1) \
+ F(Uint8x16GreaterThanOrEqual, 2, 1) \
+ F(Uint8x16Select, 3, 1) \
+ F(Uint8x16Swizzle, 17, 1) \
+ F(Uint8x16Shuffle, 18, 1) \
+ F(Uint8x16FromInt8x16, 1, 1) \
+ F(Uint8x16FromFloat32x4Bits, 1, 1) \
+ F(Uint8x16FromInt32x4Bits, 1, 1) \
+ F(Uint8x16FromUint32x4Bits, 1, 1) \
+ F(Uint8x16FromInt16x8Bits, 1, 1) \
+ F(Uint8x16FromUint16x8Bits, 1, 1) \
+ F(Uint8x16FromInt8x16Bits, 1, 1) \
+ F(Uint8x16Load, 2, 1) \
+ F(Uint8x16Store, 3, 1) \
+ F(Bool8x16Check, 1, 1) \
+ F(Bool8x16ExtractLane, 2, 1) \
+ F(Bool8x16ReplaceLane, 3, 1) \
+ F(Bool8x16And, 2, 1) \
+ F(Bool8x16Or, 2, 1) \
+ F(Bool8x16Xor, 2, 1) \
+ F(Bool8x16Not, 1, 1) \
+ F(Bool8x16AnyTrue, 1, 1) \
+ F(Bool8x16AllTrue, 1, 1) \
+ F(Bool8x16Swizzle, 17, 1) \
F(Bool8x16Shuffle, 18, 1)
@@ -759,7 +915,6 @@ namespace internal {
F(StringTrim, 3, 1) \
F(TruncateString, 2, 1) \
F(NewString, 2, 1) \
- F(NewConsString, 4, 1) \
F(StringEquals, 2, 1) \
F(FlattenString, 1, 1) \
F(StringCharFromCode, 1, 1) \
@@ -769,15 +924,14 @@ namespace internal {
F(TwoByteSeqStringGetChar, 2, 1) \
F(TwoByteSeqStringSetChar, 3, 1) \
F(StringCharCodeAt, 2, 1) \
- F(IsStringWrapperSafeForDefaultValueOf, 1, 1) \
F(StringGetLength, 1, 1)
#define FOR_EACH_INTRINSIC_SYMBOL(F) \
F(CreateSymbol, 1, 1) \
F(CreatePrivateSymbol, 1, 1) \
- F(CreateGlobalPrivateSymbol, 1, 1) \
F(SymbolDescription, 1, 1) \
+ F(SymbolDescriptiveString, 1, 1) \
F(SymbolRegistry, 0, 1) \
F(SymbolIsPrivate, 1, 1)
@@ -810,6 +964,7 @@ namespace internal {
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
F(HaveSameMap, 2, 1) \
+ F(InNewSpace, 1, 1) \
F(HasFastSmiElements, 1, 1) \
F(HasFastObjectElements, 1, 1) \
F(HasFastSmiOrObjectElements, 1, 1) \
@@ -883,7 +1038,6 @@ namespace internal {
F(LoadIC_Miss, 3, 1) \
F(KeyedLoadIC_Miss, 3, 1) \
F(CallIC_Miss, 3, 1) \
- F(CallIC_Customization_Miss, 3, 1) \
F(StoreIC_Miss, 3, 1) \
F(StoreIC_Slow, 3, 1) \
F(KeyedStoreIC_Miss, 3, 1) \
@@ -916,6 +1070,7 @@ namespace internal {
FOR_EACH_INTRINSIC_DATE(F) \
FOR_EACH_INTRINSIC_DEBUG(F) \
FOR_EACH_INTRINSIC_FORIN(F) \
+ FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
FOR_EACH_INTRINSIC_FUTEX(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
@@ -928,6 +1083,7 @@ namespace internal {
FOR_EACH_INTRINSIC_NUMBERS(F) \
FOR_EACH_INTRINSIC_OBJECT(F) \
FOR_EACH_INTRINSIC_OBSERVE(F) \
+ FOR_EACH_INTRINSIC_OPERATORS(F) \
FOR_EACH_INTRINSIC_PROXY(F) \
FOR_EACH_INTRINSIC_REGEXP(F) \
FOR_EACH_INTRINSIC_SCOPES(F) \
@@ -975,18 +1131,16 @@ class RuntimeState {
};
-class JavaScriptFrameIterator; // Forward declaration.
-
-
class Runtime : public AllStatic {
public:
enum FunctionId {
#define F(name, nargs, ressize) k##name,
#define I(name, nargs, ressize) kInline##name,
- FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)
+ FOR_EACH_INTRINSIC(F)
+ FOR_EACH_INTRINSIC(I)
#undef I
#undef F
- kNumFunctions,
+ kNumFunctions,
};
enum IntrinsicType { RUNTIME, INLINE };
@@ -998,14 +1152,15 @@ class Runtime : public AllStatic {
// The JS name of the function.
const char* name;
- // The C++ (native) entry point. NULL if the function is inlined.
- byte* entry;
+ // For RUNTIME functions, this is the C++ entry point.
+ // For INLINE functions this is the C++ entry point of the fall back.
+ Address entry;
// The number of arguments expected. nargs is -1 if the function takes
// a variable number of arguments.
- int nargs;
+ int8_t nargs;
// Size of result. Most functions return a single pointer, size 1.
- int result_size;
+ int8_t result_size;
};
static const int kNotFound = -1;
@@ -1036,31 +1191,8 @@ class Runtime : public AllStatic {
Isolate* isolate, Handle<Object> object, Handle<Object> key,
LanguageMode language_mode = SLOPPY);
- MUST_USE_RESULT static MaybeHandle<Object> KeyedGetObjectProperty(
- Isolate* isolate, Handle<Object> receiver_obj, Handle<Object> key_obj,
- LanguageMode language_mode);
-
- MUST_USE_RESULT static MaybeHandle<Object> GetPrototype(
- Isolate* isolate, Handle<Object> object);
-
- MUST_USE_RESULT static MaybeHandle<Name> ToName(Isolate* isolate,
- Handle<Object> key);
-
- static void SetupArrayBuffer(Isolate* isolate,
- Handle<JSArrayBuffer> array_buffer,
- bool is_external, void* data,
- size_t allocated_length,
- SharedFlag shared = SharedFlag::kNotShared);
-
- static bool SetupArrayBufferAllocatingData(
- Isolate* isolate, Handle<JSArrayBuffer> array_buffer,
- size_t allocated_length, bool initialize = true,
- SharedFlag shared = SharedFlag::kNotShared);
-
- static void NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer);
-
enum TypedArrayId {
- // arrayIds below should be synchromized with typedarray.js natives.
+ // arrayIds below should be synchronized with typedarray.js natives.
ARRAY_ID_UINT8 = 1,
ARRAY_ID_INT8 = 2,
ARRAY_ID_UINT16 = 3,
@@ -1080,29 +1212,20 @@ class Runtime : public AllStatic {
// Used in runtime.cc and hydrogen's VisitArrayLiteral.
MUST_USE_RESULT static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
- Isolate* isolate, Handle<FixedArray> literals,
+ Isolate* isolate, Handle<LiteralsArray> literals,
Handle<FixedArray> elements, bool is_strong);
-
- static void JSMapInitialize(Isolate* isolate, Handle<JSMap> map);
- static void JSMapClear(Isolate* isolate, Handle<JSMap> map);
- static void JSSetInitialize(Isolate* isolate, Handle<JSSet> set);
- static void JSSetClear(Isolate* isolate, Handle<JSSet> set);
-
- static void WeakCollectionInitialize(
- Isolate* isolate, Handle<JSWeakCollection> weak_collection);
- static void WeakCollectionSet(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key, Handle<Object> value,
- int32_t hash);
- static bool WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key);
- static bool WeakCollectionDelete(Handle<JSWeakCollection> weak_collection,
- Handle<Object> key, int32_t hash);
-
static MaybeHandle<JSArray> GetInternalProperties(Isolate* isolate,
Handle<Object>);
- static bool AtomicIsLockFree(uint32_t size);
+ // Find the arguments of the JavaScript function invocation that called
+ // into C++ code. Collect these in a newly allocated array of handles
+ // (possibly prefixed by a number of empty handles).
+ // TODO(mstarzinger): Temporary workaround until this is only used by the
+ // %_Arguments and %_ArgumentsLength intrinsics. Make this function local to
+ // runtime-scopes.cc then.
+ static base::SmartArrayPointer<Handle<Object>> GetCallerArguments(
+ Isolate* isolate, int prefix_argc, int* total_argc);
};
@@ -1119,29 +1242,6 @@ class DeclareGlobalsNativeFlag : public BitField<bool, 1, 1> {};
STATIC_ASSERT(LANGUAGE_END == 3);
class DeclareGlobalsLanguageMode : public BitField<LanguageMode, 2, 2> {};
-//---------------------------------------------------------------------------
-// Inline functions
-
-// Assume that 32-bit architectures don't have 64-bit atomic ops.
-// TODO(binji): can we do better here?
-#if V8_TARGET_ARCH_64_BIT && V8_HOST_ARCH_64_BIT
-
-#define ATOMICS_REQUIRE_LOCK_64_BIT 0
-
-inline bool Runtime::AtomicIsLockFree(uint32_t size) {
- return size == 1 || size == 2 || size == 4 || size == 8;
-}
-
-#else
-
-#define ATOMICS_REQUIRE_LOCK_64_BIT 1
-
-inline bool Runtime::AtomicIsLockFree(uint32_t size) {
- return size == 1 || size == 2 || size == 4;
-}
-
-#endif
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/scanner-character-streams.cc b/deps/v8/src/scanner-character-streams.cc
index eaaa9bc1f7..a58f392c0c 100644
--- a/deps/v8/src/scanner-character-streams.cc
+++ b/deps/v8/src/scanner-character-streams.cc
@@ -2,12 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/scanner-character-streams.h"
#include "include/v8.h"
+#include "src/globals.h"
#include "src/handles.h"
+#include "src/list-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
+#include "src/objects.h"
#include "src/unicode-inl.h"
namespace v8 {
diff --git a/deps/v8/src/scanner-character-streams.h b/deps/v8/src/scanner-character-streams.h
index f3ee20463a..a26f50a892 100644
--- a/deps/v8/src/scanner-character-streams.h
+++ b/deps/v8/src/scanner-character-streams.h
@@ -5,11 +5,16 @@
#ifndef V8_SCANNER_CHARACTER_STREAMS_H_
#define V8_SCANNER_CHARACTER_STREAMS_H_
+#include "src/handles.h"
#include "src/scanner.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
+// Forward declarations.
+class ExternalTwoByteString;
+
// A buffered character stream based on a random access character
// source (ReadBlock can be called with pos_ pointing to any position,
// even positions before the current).
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index c6c0a8d6a2..de799033b9 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -4,18 +4,17 @@
// Features shared by parsing and pre-parsing scanners.
+#include "src/scanner.h"
+
#include <stdint.h>
#include <cmath>
-#include "src/v8.h"
-
#include "src/ast-value-factory.h"
#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
#include "src/list-inl.h"
#include "src/parser.h"
-#include "src/scanner.h"
namespace v8 {
namespace internal {
@@ -238,6 +237,11 @@ Token::Value Scanner::Next() {
next_.location.end_pos = current_.location.end_pos;
}
current_ = next_;
+ if (V8_UNLIKELY(next_next_.token != Token::UNINITIALIZED)) {
+ next_ = next_next_;
+ next_next_.token = Token::UNINITIALIZED;
+ return current_.token;
+ }
has_line_terminator_before_next_ = false;
has_multiline_comment_before_next_ = false;
if (static_cast<unsigned>(c0_) <= 0x7f) {
@@ -256,6 +260,20 @@ Token::Value Scanner::Next() {
}
+Token::Value Scanner::PeekAhead() {
+ if (next_next_.token != Token::UNINITIALIZED) {
+ return next_next_.token;
+ }
+ TokenDesc prev = current_;
+ Next();
+ Token::Value ret = next_.token;
+ next_next_ = next_;
+ next_ = current_;
+ current_ = prev;
+ return ret;
+}
+
+
// TODO(yangguo): check whether this is actually necessary.
static inline bool IsLittleEndianByteOrderMark(uc32 c) {
// The Unicode value U+FFFE is guaranteed never to be assigned as a
@@ -1433,7 +1451,7 @@ int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
bool Scanner::SetBookmark() {
if (c0_ != kNoBookmark && bookmark_c0_ == kNoBookmark &&
- source_->SetBookmark()) {
+ next_next_.token == Token::UNINITIALIZED && source_->SetBookmark()) {
bookmark_c0_ = c0_;
CopyTokenDesc(&bookmark_current_, &current_);
CopyTokenDesc(&bookmark_next_, &next_);
@@ -1561,7 +1579,7 @@ uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
// Primitive hash function, almost identical to the one used
// for strings (except that it's seeded by the length and representation).
int length = key.length();
- uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0) ;
+ uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0);
for (int i = 0; i < length; i++) {
uint32_t c = key[i];
hash = (hash + c) * 1025;
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 92588905ad..a86ed07ab9 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -25,6 +25,7 @@ namespace internal {
class AstRawString;
class AstValueFactory;
class ParserRecorder;
+class UnicodeCache;
// Returns the value (0 .. 15) of a hexadecimal character c.
@@ -109,45 +110,6 @@ class Utf16CharacterStream {
// ---------------------------------------------------------------------
-// Caching predicates used by scanners.
-
-class UnicodeCache {
- public:
- UnicodeCache() {}
- typedef unibrow::Utf8Decoder<512> Utf8Decoder;
-
- StaticResource<Utf8Decoder>* utf8_decoder() {
- return &utf8_decoder_;
- }
-
- bool IsIdentifierStart(unibrow::uchar c) { return kIsIdentifierStart.get(c); }
- bool IsIdentifierPart(unibrow::uchar c) { return kIsIdentifierPart.get(c); }
- bool IsLineTerminator(unibrow::uchar c) { return kIsLineTerminator.get(c); }
- bool IsLineTerminatorSequence(unibrow::uchar c, unibrow::uchar next) {
- if (!IsLineTerminator(c)) return false;
- if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
- return true;
- }
-
- bool IsWhiteSpace(unibrow::uchar c) { return kIsWhiteSpace.get(c); }
- bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
- return kIsWhiteSpaceOrLineTerminator.get(c);
- }
-
- private:
- unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
- unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
- unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
- unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
- unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
- kIsWhiteSpaceOrLineTerminator;
- StaticResource<Utf8Decoder> utf8_decoder_;
-
- DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
-};
-
-
-// ---------------------------------------------------------------------
// DuplicateFinder discovers duplicate symbols.
class DuplicateFinder {
@@ -399,6 +361,8 @@ class Scanner {
// Returns the next token and advances input.
Token::Value Next();
+ // Returns the token following peek()
+ Token::Value PeekAhead();
// Returns the current token again.
Token::Value current_token() { return current_.token; }
// Returns the location information for the current token
@@ -527,6 +491,7 @@ class Scanner {
// Initialize current_ to not refer to a literal.
current_.literal_chars = NULL;
current_.raw_literal_chars = NULL;
+ next_next_.token = Token::UNINITIALIZED;
}
// Support BookmarkScope functionality.
@@ -539,16 +504,22 @@ class Scanner {
// Literal buffer support
inline void StartLiteral() {
- LiteralBuffer* free_buffer = (current_.literal_chars == &literal_buffer1_) ?
- &literal_buffer2_ : &literal_buffer1_;
+ LiteralBuffer* free_buffer =
+ (current_.literal_chars == &literal_buffer0_)
+ ? &literal_buffer1_
+ : (current_.literal_chars == &literal_buffer1_) ? &literal_buffer2_
+ : &literal_buffer0_;
free_buffer->Reset();
next_.literal_chars = free_buffer;
}
inline void StartRawLiteral() {
LiteralBuffer* free_buffer =
- (current_.raw_literal_chars == &raw_literal_buffer1_) ?
- &raw_literal_buffer2_ : &raw_literal_buffer1_;
+ (current_.raw_literal_chars == &raw_literal_buffer0_)
+ ? &raw_literal_buffer1_
+ : (current_.raw_literal_chars == &raw_literal_buffer1_)
+ ? &raw_literal_buffer2_
+ : &raw_literal_buffer0_;
free_buffer->Reset();
next_.raw_literal_chars = free_buffer;
}
@@ -725,6 +696,7 @@ class Scanner {
UnicodeCache* unicode_cache_;
// Buffers collecting literal strings, numbers, etc.
+ LiteralBuffer literal_buffer0_;
LiteralBuffer literal_buffer1_;
LiteralBuffer literal_buffer2_;
@@ -733,11 +705,13 @@ class Scanner {
LiteralBuffer source_mapping_url_;
// Buffer to store raw string values
+ LiteralBuffer raw_literal_buffer0_;
LiteralBuffer raw_literal_buffer1_;
LiteralBuffer raw_literal_buffer2_;
- TokenDesc current_; // desc for current token (as returned by Next())
- TokenDesc next_; // desc for next token (one token look-ahead)
+ TokenDesc current_; // desc for current token (as returned by Next())
+ TokenDesc next_; // desc for next token (one token look-ahead)
+ TokenDesc next_next_; // desc for the token after next (after PeakAhead())
// Variables for Scanner::BookmarkScope and the *Bookmark implementation.
// These variables contain the scanner state when a bookmark is set.
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index e53f36d274..732908a9e6 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -2,12 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <stdlib.h>
+#include "src/scopeinfo.h"
-#include "src/v8.h"
+#include <stdlib.h>
#include "src/bootstrapper.h"
-#include "src/scopeinfo.h"
#include "src/scopes.h"
namespace v8 {
@@ -89,6 +88,7 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
int flags = ScopeTypeField::encode(scope->scope_type()) |
CallsEvalField::encode(scope->calls_eval()) |
LanguageModeField::encode(scope->language_mode()) |
+ DeclarationScopeField::encode(scope->is_declaration_scope()) |
ReceiverVariableField::encode(receiver_info) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
@@ -244,6 +244,7 @@ Handle<ScopeInfo> ScopeInfo::CreateGlobalThisBinding(Isolate* isolate) {
int flags = ScopeTypeField::encode(SCRIPT_SCOPE) |
CallsEvalField::encode(false) |
LanguageModeField::encode(SLOPPY) |
+ DeclarationScopeField::encode(true) |
ReceiverVariableField::encode(receiver_info) |
FunctionVariableField::encode(function_name_info) |
FunctionVariableMode::encode(function_variable_mode) |
@@ -311,6 +312,11 @@ LanguageMode ScopeInfo::language_mode() {
}
+bool ScopeInfo::is_declaration_scope() {
+ return DeclarationScopeField::decode(Flags());
+}
+
+
int ScopeInfo::LocalCount() {
return StackLocalCount() + ContextLocalCount();
}
@@ -335,6 +341,8 @@ int ScopeInfo::ContextLength() {
bool has_context = context_locals > 0 || context_globals > 0 ||
function_name_context_slot ||
scope_type() == WITH_SCOPE ||
+ (scope_type() == BLOCK_SCOPE && CallsSloppyEval() &&
+ is_declaration_scope()) ||
(scope_type() == ARROW_SCOPE && CallsSloppyEval()) ||
(scope_type() == FUNCTION_SCOPE && CallsSloppyEval()) ||
scope_type() == MODULE_SCOPE;
@@ -516,57 +524,73 @@ int ScopeInfo::StackSlotIndex(String* name) {
int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
Handle<String> name, VariableMode* mode,
- VariableLocation* location,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
DCHECK(name->IsInternalizedString());
DCHECK(mode != NULL);
- DCHECK(location != NULL);
DCHECK(init_flag != NULL);
if (scope_info->length() > 0) {
ContextSlotCache* context_slot_cache =
scope_info->GetIsolate()->context_slot_cache();
- int result = context_slot_cache->Lookup(*scope_info, *name, mode, location,
- init_flag, maybe_assigned_flag);
+ int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
+ maybe_assigned_flag);
if (result != ContextSlotCache::kNotFound) {
DCHECK(result < scope_info->ContextLength());
return result;
}
+ int start = scope_info->ContextLocalNameEntriesIndex();
+ int end = scope_info->ContextLocalNameEntriesIndex() +
+ scope_info->ContextLocalCount();
+ for (int i = start; i < end; ++i) {
+ if (*name == scope_info->get(i)) {
+ int var = i - start;
+ *mode = scope_info->ContextLocalMode(var);
+ *init_flag = scope_info->ContextLocalInitFlag(var);
+ *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
+ result = Context::MIN_CONTEXT_SLOTS + var;
+
+ context_slot_cache->Update(scope_info, name, *mode, *init_flag,
+ *maybe_assigned_flag, result);
+ DCHECK(result < scope_info->ContextLength());
+ return result;
+ }
+ }
+ // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
+ context_slot_cache->Update(scope_info, name, TEMPORARY,
+ kNeedsInitialization, kNotAssigned, -1);
+ }
+ return -1;
+}
+
+
+int ScopeInfo::ContextGlobalSlotIndex(Handle<ScopeInfo> scope_info,
+ Handle<String> name, VariableMode* mode,
+ InitializationFlag* init_flag,
+ MaybeAssignedFlag* maybe_assigned_flag) {
+ DCHECK(name->IsInternalizedString());
+ DCHECK(mode != NULL);
+ DCHECK(init_flag != NULL);
+ if (scope_info->length() > 0) {
+ // This is to ensure that ContextLocalMode() and co. queries would work.
DCHECK_EQ(scope_info->ContextGlobalNameEntriesIndex(),
scope_info->ContextLocalNameEntriesIndex() +
scope_info->ContextLocalCount());
- int start = scope_info->ContextLocalNameEntriesIndex();
+ int base = scope_info->ContextLocalNameEntriesIndex();
+ int start = scope_info->ContextGlobalNameEntriesIndex();
int end = scope_info->ContextGlobalNameEntriesIndex() +
scope_info->ContextGlobalCount();
for (int i = start; i < end; ++i) {
if (*name == scope_info->get(i)) {
- int var = i - start;
+ int var = i - base;
*mode = scope_info->ContextLocalMode(var);
*init_flag = scope_info->ContextLocalInitFlag(var);
*maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
-
- if (var < scope_info->ContextLocalCount()) {
- *location = VariableLocation::CONTEXT;
- result = Context::MIN_CONTEXT_SLOTS + var;
- } else {
- var -= scope_info->ContextLocalCount();
- *location = VariableLocation::GLOBAL;
- result = Context::MIN_CONTEXT_SLOTS +
- scope_info->ContextLocalCount() + var;
- }
-
- context_slot_cache->Update(scope_info, name, *mode, *location,
- *init_flag, *maybe_assigned_flag, result);
+ int result = Context::MIN_CONTEXT_SLOTS + var;
DCHECK(result < scope_info->ContextLength());
return result;
}
}
- // Cache as not found. Mode, location, init flag and maybe assigned flag
- // don't matter.
- context_slot_cache->Update(scope_info, name, TEMPORARY,
- VariableLocation::CONTEXT, kNeedsInitialization,
- kNotAssigned, -1);
}
return -1;
}
@@ -693,7 +717,6 @@ int ContextSlotCache::Hash(Object* data, String* name) {
int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
- VariableLocation* location,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
int index = Hash(data, name);
@@ -701,7 +724,6 @@ int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
if ((key.data == data) && key.name->Equals(name)) {
Value result(values_[index]);
if (mode != NULL) *mode = result.mode();
- if (location != NULL) *location = result.location();
if (init_flag != NULL) *init_flag = result.initialization_flag();
if (maybe_assigned_flag != NULL)
*maybe_assigned_flag = result.maybe_assigned_flag();
@@ -712,8 +734,7 @@ int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
- VariableMode mode, VariableLocation location,
- InitializationFlag init_flag,
+ VariableMode mode, InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
DisallowHeapAllocation no_gc;
@@ -726,11 +747,10 @@ void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
key.data = *data;
key.name = *internalized_name;
// Please note value only takes a uint as index.
- values_[index] = Value(mode, location, init_flag, maybe_assigned_flag,
+ values_[index] = Value(mode, init_flag, maybe_assigned_flag,
slot_index - kNotFound).raw();
#ifdef DEBUG
- ValidateEntry(data, name, mode, location, init_flag, maybe_assigned_flag,
- slot_index);
+ ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
#endif
}
}
@@ -745,7 +765,6 @@ void ContextSlotCache::Clear() {
void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
VariableMode mode,
- VariableLocation location,
InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
@@ -759,7 +778,6 @@ void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
DCHECK(key.name->Equals(*name));
Value result(values_[index]);
DCHECK(result.mode() == mode);
- DCHECK(result.location() == location);
DCHECK(result.initialization_flag() == init_flag);
DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
DCHECK(result.index() + kNotFound == slot_index);
@@ -795,19 +813,15 @@ void ScopeInfo::Print() {
}
PrintF("{");
- PrintList("parameters", 0,
- ParameterEntriesIndex(),
- ParameterEntriesIndex() + ParameterCount(),
- this);
- PrintList("stack slots", 0,
- StackLocalEntriesIndex(),
- StackLocalEntriesIndex() + StackLocalCount(),
- this);
- PrintList("context slots",
- Context::MIN_CONTEXT_SLOTS,
- ContextLocalNameEntriesIndex(),
- ContextLocalNameEntriesIndex() + ContextLocalCount(),
- this);
+ if (length() > 0) {
+ PrintList("parameters", 0, ParameterEntriesIndex(),
+ ParameterEntriesIndex() + ParameterCount(), this);
+ PrintList("stack slots", 0, StackLocalEntriesIndex(),
+ StackLocalEntriesIndex() + StackLocalCount(), this);
+ PrintList("context slots", Context::MIN_CONTEXT_SLOTS,
+ ContextLocalNameEntriesIndex(),
+ ContextLocalNameEntriesIndex() + ContextLocalCount(), this);
+ }
PrintF("}\n");
}
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index adefaef974..70a17cd7d4 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -21,12 +21,12 @@ class ContextSlotCache {
// Lookup context slot index for (data, name).
// If absent, kNotFound is returned.
int Lookup(Object* data, String* name, VariableMode* mode,
- VariableLocation* location, InitializationFlag* init_flag,
+ InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag);
// Update an element in the cache.
void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
- VariableLocation location, InitializationFlag init_flag,
+ InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag, int slot_index);
// Clear the cache.
@@ -47,8 +47,7 @@ class ContextSlotCache {
#ifdef DEBUG
void ValidateEntry(Handle<Object> data, Handle<String> name,
- VariableMode mode, VariableLocation location,
- InitializationFlag init_flag,
+ VariableMode mode, InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag, int slot_index);
#endif
@@ -59,26 +58,16 @@ class ContextSlotCache {
};
struct Value {
- enum VariableLocationFlag { kContext, kGlobal };
-
- Value(VariableMode mode, VariableLocation location,
- InitializationFlag init_flag, MaybeAssignedFlag maybe_assigned_flag,
- int index) {
- DCHECK(location == VariableLocation::CONTEXT ||
- location == VariableLocation::GLOBAL);
- VariableLocationFlag location_flag =
- location == VariableLocation::CONTEXT ? kContext : kGlobal;
+ Value(VariableMode mode, InitializationFlag init_flag,
+ MaybeAssignedFlag maybe_assigned_flag, int index) {
DCHECK(ModeField::is_valid(mode));
- DCHECK(VariableLocationField::is_valid(location_flag));
DCHECK(InitField::is_valid(init_flag));
DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag));
DCHECK(IndexField::is_valid(index));
value_ = ModeField::encode(mode) | IndexField::encode(index) |
- VariableLocationField::encode(location_flag) |
InitField::encode(init_flag) |
MaybeAssignedField::encode(maybe_assigned_flag);
DCHECK(mode == this->mode());
- DCHECK(location == this->location());
DCHECK(init_flag == this->initialization_flag());
DCHECK(maybe_assigned_flag == this->maybe_assigned_flag());
DCHECK(index == this->index());
@@ -90,17 +79,6 @@ class ContextSlotCache {
VariableMode mode() { return ModeField::decode(value_); }
- VariableLocation location() {
- switch (VariableLocationField::decode(value_)) {
- case kContext:
- return VariableLocation::CONTEXT;
- case kGlobal:
- return VariableLocation::GLOBAL;
- }
- UNREACHABLE();
- return VariableLocation::CONTEXT;
- }
-
InitializationFlag initialization_flag() {
return InitField::decode(value_);
}
@@ -116,9 +94,7 @@ class ContextSlotCache {
class ModeField : public BitField<VariableMode, 0, 4> {};
class InitField : public BitField<InitializationFlag, 4, 1> {};
class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
- class VariableLocationField : public BitField<VariableLocationFlag, 6, 1> {
- };
- class IndexField : public BitField<int, 7, 32 - 7> {};
+ class IndexField : public BitField<int, 6, 32 - 6> {};
private:
uint32_t value_;
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 9c0e471989..a611d7364c 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -66,6 +66,27 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
}
+SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
+ : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)),
+ zone_(zone) {}
+SloppyBlockFunctionMap::~SloppyBlockFunctionMap() {}
+
+
+void SloppyBlockFunctionMap::Declare(const AstRawString* name,
+ SloppyBlockFunctionStatement* stmt) {
+ // AstRawStrings are unambiguous, i.e., the same string is always represented
+ // by the same AstRawString*.
+ Entry* p =
+ ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
+ ZoneAllocationPolicy(zone_));
+ if (p->value == nullptr) {
+ p->value = new (zone_->New(sizeof(Vector))) Vector(zone_);
+ }
+ Vector* delegates = static_cast<Vector*>(p->value);
+ delegates->push_back(stmt);
+}
+
+
// ----------------------------------------------------------------------------
// Implementation of Scope
@@ -79,6 +100,7 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
decls_(4, zone),
module_descriptor_(
scope_type == MODULE_SCOPE ? ModuleDescriptor::New(zone) : NULL),
+ sloppy_block_function_map_(zone),
already_resolved_(false),
ast_value_factory_(ast_value_factory),
zone_(zone),
@@ -100,6 +122,7 @@ Scope::Scope(Zone* zone, Scope* inner_scope, ScopeType scope_type,
unresolved_(16, zone),
decls_(4, zone),
module_descriptor_(NULL),
+ sloppy_block_function_map_(zone),
already_resolved_(true),
ast_value_factory_(value_factory),
zone_(zone),
@@ -125,6 +148,7 @@ Scope::Scope(Zone* zone, Scope* inner_scope,
unresolved_(0, zone),
decls_(0, zone),
module_descriptor_(NULL),
+ sloppy_block_function_map_(zone),
already_resolved_(true),
ast_value_factory_(value_factory),
zone_(zone),
@@ -171,6 +195,7 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
outer_scope_calls_sloppy_eval_ = false;
inner_scope_calls_eval_ = false;
inner_scope_uses_arguments_ = false;
+ scope_nonlinear_ = false;
force_eager_compilation_ = false;
force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
? outer_scope->has_forced_context_allocation() : false;
@@ -180,6 +205,7 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
num_global_slots_ = 0;
num_modules_ = 0;
module_var_ = NULL;
+ arity_ = 0;
has_simple_parameters_ = true;
rest_parameter_ = NULL;
rest_index_ = -1;
@@ -189,6 +215,7 @@ void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
language_mode_ = scope_info->language_mode();
+ is_declaration_scope_ = scope_info->is_declaration_scope();
function_kind_ = scope_info->function_kind();
}
}
@@ -212,12 +239,12 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
s->scope_inside_with_ = true;
}
} else if (context->IsScriptContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ ScopeInfo* scope_info = context->scope_info();
current_scope = new (zone) Scope(zone, current_scope, SCRIPT_SCOPE,
Handle<ScopeInfo>(scope_info),
script_scope->ast_value_factory_);
} else if (context->IsModuleContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
+ ScopeInfo* scope_info = context->module()->scope_info();
current_scope = new (zone) Scope(zone, current_scope, MODULE_SCOPE,
Handle<ScopeInfo>(scope_info),
script_scope->ast_value_factory_);
@@ -229,13 +256,13 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
if (scope_info->IsAsmFunction()) current_scope->asm_function_ = true;
if (scope_info->IsAsmModule()) current_scope->asm_module_ = true;
} else if (context->IsBlockContext()) {
- ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
+ ScopeInfo* scope_info = context->scope_info();
current_scope = new (zone)
Scope(zone, current_scope, BLOCK_SCOPE, Handle<ScopeInfo>(scope_info),
script_scope->ast_value_factory_);
} else {
DCHECK(context->IsCatchContext());
- String* name = String::cast(context->extension());
+ String* name = context->catch_name();
current_scope = new (zone) Scope(
zone, current_scope,
script_scope->ast_value_factory_->GetString(Handle<String>(name)),
@@ -286,7 +313,7 @@ bool Scope::Analyze(ParseInfo* info) {
if (!info->shared_info().is_null()) {
Object* script = info->shared_info()->script();
native = script->IsScript() &&
- Script::cast(script)->type()->value() == Script::TYPE_NATIVE;
+ Script::cast(script)->type() == Script::TYPE_NATIVE;
}
if (native ? FLAG_print_builtin_scopes : FLAG_print_scopes) scope->Print();
@@ -330,7 +357,7 @@ void Scope::Initialize() {
Variable::NORMAL, kCreatedInitialized);
}
- if (IsConciseMethod(function_kind_) || IsConstructor(function_kind_) ||
+ if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
variables_.Declare(this, ast_value_factory_->this_function_string(),
CONST, Variable::NORMAL, kCreatedInitialized);
@@ -344,7 +371,10 @@ Scope* Scope::FinalizeBlockScope() {
DCHECK(temps_.is_empty());
DCHECK(params_.is_empty());
- if (num_var_or_const() > 0) return this;
+ if (num_var_or_const() > 0 ||
+ (is_declaration_scope() && calls_sloppy_eval())) {
+ return this;
+ }
// Remove this scope from outer scope.
for (int i = 0; i < outer_scope_->inner_scopes_.length(); i++) {
@@ -388,12 +418,16 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
// Check context slot lookup.
VariableMode mode;
- VariableLocation location;
+ VariableLocation location = VariableLocation::CONTEXT;
InitializationFlag init_flag;
MaybeAssignedFlag maybe_assigned_flag;
- int index =
- ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode, &location,
- &init_flag, &maybe_assigned_flag);
+ int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
+ &init_flag, &maybe_assigned_flag);
+ if (index < 0) {
+ location = VariableLocation::GLOBAL;
+ index = ScopeInfo::ContextGlobalSlotIndex(scope_info_, name_handle, &mode,
+ &init_flag, &maybe_assigned_flag);
+ }
if (index < 0) {
// Check parameters.
index = scope_info_->ParameterIndex(*name_handle);
@@ -461,25 +495,28 @@ Variable* Scope::Lookup(const AstRawString* name) {
}
-Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode,
- bool is_rest, bool* is_duplicate) {
+Variable* Scope::DeclareParameter(
+ const AstRawString* name, VariableMode mode,
+ bool is_optional, bool is_rest, bool* is_duplicate) {
DCHECK(!already_resolved());
DCHECK(is_function_scope());
+ DCHECK(!is_optional || !is_rest);
Variable* var;
if (mode == TEMPORARY) {
var = NewTemporary(name);
- has_simple_parameters_ = false;
} else {
var = variables_.Declare(this, name, mode, Variable::NORMAL,
kCreatedInitialized);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
}
+ if (!is_optional && !is_rest && arity_ == params_.length()) {
+ ++arity_;
+ }
if (is_rest) {
DCHECK_NULL(rest_parameter_);
rest_parameter_ = var;
rest_index_ = num_parameters();
- has_simple_parameters_ = false;
}
params_.Add(var, zone());
return var;
@@ -811,14 +848,14 @@ void Scope::ReportMessage(int start_position, int end_position,
#ifdef DEBUG
-static const char* Header(ScopeType scope_type) {
+static const char* Header(ScopeType scope_type, bool is_declaration_scope) {
switch (scope_type) {
case EVAL_SCOPE: return "eval";
case FUNCTION_SCOPE: return "function";
case MODULE_SCOPE: return "module";
case SCRIPT_SCOPE: return "global";
case CATCH_SCOPE: return "catch";
- case BLOCK_SCOPE: return "block";
+ case BLOCK_SCOPE: return is_declaration_scope ? "varblock" : "block";
case WITH_SCOPE: return "with";
case ARROW_SCOPE: return "arrow";
}
@@ -902,7 +939,7 @@ void Scope::Print(int n) {
int n1 = n0 + 2; // indentation
// Print header.
- Indent(n0, Header(scope_type_));
+ Indent(n0, Header(scope_type_, is_declaration_scope()));
if (!scope_name_->IsEmpty()) {
PrintF(" ");
PrintName(scope_name_);
@@ -1248,7 +1285,7 @@ ClassVariable* Scope::ClassVariableForMethod() const {
// It needs to be investigated whether this causes any practical problems.
if (!is_function_scope()) return nullptr;
if (IsInObjectLiteral(function_kind_)) return nullptr;
- if (!IsConciseMethod(function_kind_) && !IsConstructor(function_kind_) &&
+ if (!IsConciseMethod(function_kind_) && !IsClassConstructor(function_kind_) &&
!IsAccessorFunction(function_kind_)) {
return nullptr;
}
@@ -1567,8 +1604,10 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
// scope and for a function scope that makes an 'eval' call we need a context,
// even if no local variables were statically allocated in the scope.
// Likewise for modules.
- bool must_have_context = is_with_scope() || is_module_scope() ||
- (is_function_scope() && calls_sloppy_eval());
+ bool must_have_context =
+ is_with_scope() || is_module_scope() ||
+ (is_function_scope() && calls_sloppy_eval()) ||
+ (is_block_scope() && is_declaration_scope() && calls_sloppy_eval());
// If we didn't allocate any locals in the local context, then we only
// need the minimal number of slots if we must have a context.
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 12a5a9b18a..61bf6338f7 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -57,6 +57,23 @@ class DynamicScopePart : public ZoneObject {
};
+// Sloppy block-scoped function declarations to var-bind
+class SloppyBlockFunctionMap : public ZoneHashMap {
+ public:
+ explicit SloppyBlockFunctionMap(Zone* zone);
+
+ virtual ~SloppyBlockFunctionMap();
+
+ void Declare(const AstRawString* name,
+ SloppyBlockFunctionStatement* statement);
+
+ typedef ZoneVector<SloppyBlockFunctionStatement*> Vector;
+
+ private:
+ Zone* zone_;
+};
+
+
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
// VariableProxy node. Immediately after AST construction and before variable
@@ -128,8 +145,9 @@ class Scope: public ZoneObject {
// Declare a parameter in this scope. When there are duplicated
// parameters the rightmost one 'wins'. However, the implementation
// expects all parameters to be declared and from left to right.
- Variable* DeclareParameter(const AstRawString* name, VariableMode mode,
- bool is_rest, bool* is_duplicate);
+ Variable* DeclareParameter(
+ const AstRawString* name, VariableMode mode,
+ bool is_optional, bool is_rest, bool* is_duplicate);
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
@@ -224,6 +242,17 @@ class Scope: public ZoneObject {
// Set the ASM module flag.
void SetAsmModule() { asm_module_ = true; }
+ // Inform the scope that the scope may execute declarations nonlinearly.
+ // Currently, the only nonlinear scope is a switch statement. The name is
+ // more general in case something else comes up with similar control flow,
+ // for example the ability to break out of something which does not have
+ // its own lexical scope.
+ // The bit does not need to be stored on the ScopeInfo because none of
+ // the three compilers will perform hole check elimination on a variable
+ // located in VariableLocation::CONTEXT. So, direct eval and closures
+ // will not expose holes.
+ void SetNonlinear() { scope_nonlinear_ = true; }
+
// Position in the source where this scope begins and ends.
//
// * For the scope of a with statement
@@ -246,6 +275,10 @@ class Scope: public ZoneObject {
// for (let x ...) stmt
// start position: start position of '('
// end position: end position of last token of 'stmt'
+ // * For the scope of a switch statement
+ // switch (tag) { cases }
+ // start position: start position of '{'
+ // end position: end position of '}'
int start_position() const { return start_position_; }
void set_start_position(int statement_pos) {
start_position_ = statement_pos;
@@ -284,7 +317,7 @@ class Scope: public ZoneObject {
// Information about which scopes calls eval.
bool calls_eval() const { return scope_calls_eval_; }
- bool calls_sloppy_eval() {
+ bool calls_sloppy_eval() const {
return scope_calls_eval_ && is_sloppy(language_mode_);
}
bool outer_scope_calls_sloppy_eval() const {
@@ -304,12 +337,17 @@ class Scope: public ZoneObject {
bool inner_uses_arguments() const { return inner_scope_uses_arguments_; }
// Does this scope access "super" property (super.foo).
bool uses_super_property() const { return scope_uses_super_property_; }
+ // Does this scope have the potential to execute declarations non-linearly?
+ bool is_nonlinear() const { return scope_nonlinear_; }
+
+ // Whether this needs to be represented by a runtime context.
+ bool NeedsContext() const { return num_heap_slots() > 0; }
bool NeedsHomeObject() const {
return scope_uses_super_property_ ||
(scope_calls_eval_ && (IsConciseMethod(function_kind()) ||
IsAccessorFunction(function_kind()) ||
- IsConstructor(function_kind())));
+ IsClassConstructor(function_kind())));
}
const Scope* NearestOuterEvalScope() const {
@@ -362,16 +400,8 @@ class Scope: public ZoneObject {
return params_[index];
}
- // Returns the default function arity --- does not include rest parameters.
- int default_function_length() const {
- int count = params_.length();
- if (rest_index_ >= 0) {
- DCHECK(count > 0);
- DCHECK(is_function_scope());
- --count;
- }
- return count;
- }
+ // Returns the default function arity excluding default or rest parameters.
+ int default_function_length() const { return arity_; }
int num_parameters() const { return params_.length(); }
@@ -387,10 +417,25 @@ class Scope: public ZoneObject {
}
bool has_simple_parameters() const {
- DCHECK(is_function_scope());
return has_simple_parameters_;
}
+ // TODO(caitp): manage this state in a better way. PreParser must be able to
+ // communicate that the scope is non-simple, without allocating any parameters
+ // as the Parser does. This is necessary to ensure that TC39's proposed early
+ // error can be reported consistently regardless of whether lazily parsed or
+ // not.
+ void SetHasNonSimpleParameters() {
+ DCHECK(is_function_scope());
+ has_simple_parameters_ = false;
+ }
+
+ // Retrieve `IsSimpleParameterList` of current or outer function.
+ bool HasSimpleParameters() {
+ Scope* scope = ClosureScope();
+ return !scope->is_function_scope() || scope->has_simple_parameters();
+ }
+
// The local variable 'arguments' if we need to allocate it; NULL otherwise.
Variable* arguments() const {
DCHECK(!is_arrow_scope() || arguments_ == nullptr);
@@ -400,7 +445,7 @@ class Scope: public ZoneObject {
Variable* this_function_var() const {
// This is only used in derived constructors atm.
DCHECK(this_function_ == nullptr ||
- (is_function_scope() && (IsConstructor(function_kind()) ||
+ (is_function_scope() && (IsClassConstructor(function_kind()) ||
IsConciseMethod(function_kind()) ||
IsAccessorFunction(function_kind()))));
return this_function_;
@@ -516,6 +561,10 @@ class Scope: public ZoneObject {
return params_.Contains(variables_.Lookup(name));
}
+ SloppyBlockFunctionMap* sloppy_block_function_map() {
+ return &sloppy_block_function_map_;
+ }
+
// Error handling.
void ReportMessage(int start_position, int end_position,
MessageTemplate::Template message,
@@ -574,6 +623,9 @@ class Scope: public ZoneObject {
// Module descriptor; module scopes only.
ModuleDescriptor* module_descriptor_;
+ // Map of function names to lists of functions defined in sloppy blocks
+ SloppyBlockFunctionMap sloppy_block_function_map_;
+
// Illegal redeclaration.
Expression* illegal_redecl_;
@@ -594,6 +646,8 @@ class Scope: public ZoneObject {
bool asm_module_;
// This scope's outer context is an asm module.
bool asm_function_;
+ // This scope's declarations might not be executed in order (e.g., switch).
+ bool scope_nonlinear_;
// The language mode of this scope.
LanguageMode language_mode_;
// Source positions.
@@ -629,6 +683,7 @@ class Scope: public ZoneObject {
Variable* module_var_;
// Info about the parameter list of a function.
+ int arity_;
bool has_simple_parameters_;
Variable* rest_parameter_;
int rest_index_;
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index f44eca523a..09cbf93e1e 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -6,14 +6,13 @@
#include <signal.h>
#include <stdio.h>
-#include "src/v8.h"
-
#include "include/libplatform/libplatform.h"
#include "src/assembler.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/flags.h"
#include "src/list.h"
+#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker!
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
@@ -22,15 +21,17 @@ using namespace v8;
class SnapshotWriter {
public:
- explicit SnapshotWriter(const char* snapshot_file)
- : fp_(GetFileDescriptorOrDie(snapshot_file)),
- startup_blob_file_(NULL) {}
+ SnapshotWriter() : fp_(NULL), startup_blob_file_(NULL) {}
~SnapshotWriter() {
- fclose(fp_);
+ if (fp_) fclose(fp_);
if (startup_blob_file_) fclose(startup_blob_file_);
}
+ void SetSnapshotFile(const char* snapshot_file) {
+ if (snapshot_file != NULL) fp_ = GetFileDescriptorOrDie(snapshot_file);
+ }
+
void SetStartupBlobFile(const char* startup_blob_file) {
if (startup_blob_file != NULL)
startup_blob_file_ = GetFileDescriptorOrDie(startup_blob_file);
@@ -39,7 +40,7 @@ class SnapshotWriter {
void WriteSnapshot(v8::StartupData blob) const {
i::Vector<const i::byte> blob_vector(
reinterpret_cast<const i::byte*>(blob.data), blob.raw_size);
- WriteSnapshotFile(blob_vector);
+ MaybeWriteSnapshotFile(blob_vector);
MaybeWriteStartupBlob(blob_vector);
}
@@ -54,7 +55,9 @@ class SnapshotWriter {
}
}
- void WriteSnapshotFile(const i::Vector<const i::byte>& blob) const {
+ void MaybeWriteSnapshotFile(const i::Vector<const i::byte>& blob) const {
+ if (!fp_) return;
+
WriteFilePrefix();
WriteData(blob);
WriteFileSuffix();
@@ -143,8 +146,9 @@ int main(int argc, char** argv) {
// Print the usage if an error occurs when parsing the command line
// flags or if the help flag is set.
int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
- if (result > 0 || (argc != 2 && argc != 3) || i::FLAG_help) {
- ::printf("Usage: %s [flag] ... outfile\n", argv[0]);
+ if (result > 0 || (argc != 1 && argc != 2) || i::FLAG_help) {
+ ::printf("Usage: %s --startup_src=... --startup_blob=... [extras]\n",
+ argv[0]);
i::FlagList::PrintHelp();
return !i::FLAG_help;
}
@@ -156,9 +160,10 @@ int main(int argc, char** argv) {
v8::V8::Initialize();
{
- SnapshotWriter writer(argv[1]);
+ SnapshotWriter writer;
+ if (i::FLAG_startup_src) writer.SetSnapshotFile(i::FLAG_startup_src);
if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
- char* extra_code = GetExtraCode(argc == 3 ? argv[2] : NULL);
+ char* extra_code = GetExtraCode(argc == 2 ? argv[1] : NULL);
StartupData blob = v8::V8::CreateSnapshotDataBlob(extra_code);
CHECK(blob.data);
writer.WriteSnapshot(blob);
diff --git a/deps/v8/src/snapshot/natives-common.cc b/deps/v8/src/snapshot/natives-common.cc
index 080cd49104..d027ec9dc7 100644
--- a/deps/v8/src/snapshot/natives-common.cc
+++ b/deps/v8/src/snapshot/natives-common.cc
@@ -30,6 +30,12 @@ FixedArray* NativesCollection<EXTRAS>::GetSourceCache(Heap* heap) {
template <>
+FixedArray* NativesCollection<EXPERIMENTAL_EXTRAS>::GetSourceCache(Heap* heap) {
+ return heap->experimental_extra_natives_source_cache();
+}
+
+
+template <>
FixedArray* NativesCollection<CODE_STUB>::GetSourceCache(Heap* heap) {
return heap->code_stub_natives_source_cache();
}
@@ -51,6 +57,8 @@ template void NativesCollection<CORE>::UpdateSourceCache(Heap* heap);
template void NativesCollection<CODE_STUB>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXPERIMENTAL>::UpdateSourceCache(Heap* heap);
template void NativesCollection<EXTRAS>::UpdateSourceCache(Heap* heap);
+template void NativesCollection<EXPERIMENTAL_EXTRAS>::UpdateSourceCache(
+ Heap* heap);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index e0df27f3ad..7e5e6c7ba0 100644
--- a/deps/v8/src/snapshot/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -161,6 +161,8 @@ void ReadNatives() {
NativesHolder<EXPERIMENTAL>::set(
NativesStore::MakeFromScriptsSource(&bytes));
NativesHolder<EXTRAS>::set(NativesStore::MakeFromScriptsSource(&bytes));
+ NativesHolder<EXPERIMENTAL_EXTRAS>::set(
+ NativesStore::MakeFromScriptsSource(&bytes));
DCHECK(!bytes.HasMore());
}
}
@@ -189,6 +191,7 @@ void DisposeNatives() {
NativesHolder<CODE_STUB>::Dispose();
NativesHolder<EXPERIMENTAL>::Dispose();
NativesHolder<EXTRAS>::Dispose();
+ NativesHolder<EXPERIMENTAL_EXTRAS>::Dispose();
}
@@ -241,6 +244,7 @@ INSTANTIATE_TEMPLATES(CORE)
INSTANTIATE_TEMPLATES(CODE_STUB)
INSTANTIATE_TEMPLATES(EXPERIMENTAL)
INSTANTIATE_TEMPLATES(EXTRAS)
+INSTANTIATE_TEMPLATES(EXPERIMENTAL_EXTRAS)
#undef INSTANTIATE_TEMPLATES
} // namespace internal
diff --git a/deps/v8/src/snapshot/natives.h b/deps/v8/src/snapshot/natives.h
index 1efaf7ece5..c923a0f353 100644
--- a/deps/v8/src/snapshot/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -13,7 +13,15 @@ namespace v8 { class StartupData; } // Forward declaration.
namespace v8 {
namespace internal {
-enum NativeType { CORE, CODE_STUB, EXPERIMENTAL, EXTRAS, D8, TEST };
+enum NativeType {
+ CORE,
+ CODE_STUB,
+ EXPERIMENTAL,
+ EXTRAS,
+ EXPERIMENTAL_EXTRAS,
+ D8,
+ TEST
+};
template <NativeType type>
class NativesCollection {
@@ -44,6 +52,7 @@ typedef NativesCollection<CORE> Natives;
typedef NativesCollection<CODE_STUB> CodeStubNatives;
typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
typedef NativesCollection<EXTRAS> ExtraNatives;
+typedef NativesCollection<EXPERIMENTAL_EXTRAS> ExperimentalExtraNatives;
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
diff --git a/deps/v8/src/snapshot/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index 9f2b4e9314..fde170d0bf 100644
--- a/deps/v8/src/snapshot/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/snapshot/serialize.h"
#include "src/accessors.h"
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
@@ -17,11 +16,12 @@
#include "src/ic/stub-cache.h"
#include "src/objects.h"
#include "src/parser.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/natives.h"
-#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
#include "src/snapshot/snapshot-source-sink.h"
+#include "src/v8.h"
#include "src/v8threads.h"
#include "src/version.h"
@@ -121,8 +121,6 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"InvokeFunctionCallback");
Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
"InvokeAccessorGetterCallback");
- Add(ExternalReference::flush_icache_function(isolate).address(),
- "CpuFeatures::FlushICache");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -131,6 +129,8 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
"double_constants.minus_one_half");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
+ Add(ExternalReference::vector_store_virtual_register(isolate).address(),
+ "Isolate::vector_store_virtual_register()");
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
@@ -354,10 +354,9 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != NULL) return;
map_ = new HashMap(HashMap::PointersMatch);
- Object** root_array = isolate->heap()->roots_array_start();
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
- Object* root = root_array[root_index];
+ Object* root = isolate->heap()->root(root_index);
// Omit root entries that can be written after initialization. They must
// not be referenced through the root list in the snapshot.
if (root->IsHeapObject() &&
@@ -506,7 +505,8 @@ void Deserializer::FlushICacheForNewIsolate() {
PageIterator it(isolate_->heap()->code_space());
while (it.has_next()) {
Page* p = it.next();
- CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
+ Assembler::FlushICache(isolate_, p->area_start(),
+ p->area_end() - p->area_start());
}
}
@@ -514,8 +514,8 @@ void Deserializer::FlushICacheForNewIsolate() {
void Deserializer::FlushICacheForNewCodeObjects() {
DCHECK(deserializing_user_code_);
for (Code* code : new_code_objects_) {
- CpuFeatures::FlushICache(code->instruction_start(),
- code->instruction_size());
+ Assembler::FlushICache(isolate_, code->instruction_start(),
+ code->instruction_size());
}
}
@@ -636,7 +636,7 @@ MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
FlushICacheForNewCodeObjects();
result = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root));
}
- CommitNewInternalizedStrings(isolate);
+ CommitPostProcessedObjects(isolate);
return scope.CloseAndEscape(result);
}
}
@@ -660,18 +660,27 @@ void Deserializer::VisitPointers(Object** start, Object** end) {
void Deserializer::DeserializeDeferredObjects() {
for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
- int space = code & kSpaceMask;
- DCHECK(space <= kNumberOfSpaces);
- DCHECK(code - space == kNewObject);
- HeapObject* object = GetBackReferencedObject(space);
- int size = source_.GetInt() << kPointerSizeLog2;
- Address obj_address = object->address();
- Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
- Object** end = reinterpret_cast<Object**>(obj_address + size);
- bool filled = ReadData(start, end, space, obj_address);
- CHECK(filled);
- DCHECK(CanBeDeferred(object));
- PostProcessNewObject(object, space);
+ switch (code) {
+ case kAlignmentPrefix:
+ case kAlignmentPrefix + 1:
+ case kAlignmentPrefix + 2:
+ SetAlignment(code);
+ break;
+ default: {
+ int space = code & kSpaceMask;
+ DCHECK(space <= kNumberOfSpaces);
+ DCHECK(code - space == kNewObject);
+ HeapObject* object = GetBackReferencedObject(space);
+ int size = source_.GetInt() << kPointerSizeLog2;
+ Address obj_address = object->address();
+ Object** start = reinterpret_cast<Object**>(obj_address + kPointerSize);
+ Object** end = reinterpret_cast<Object**>(obj_address + size);
+ bool filled = ReadData(start, end, space, obj_address);
+ CHECK(filled);
+ DCHECK(CanBeDeferred(object));
+ PostProcessNewObject(object, space);
+ }
+ }
}
}
@@ -729,8 +738,7 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
}
}
} else if (obj->IsScript()) {
- // Assign a new script id to avoid collision.
- Script::cast(obj)->set_id(isolate_->heap()->NextScriptId());
+ new_scripts_.Add(handle(Script::cast(obj)));
} else {
DCHECK(CanBeDeferred(obj));
}
@@ -763,7 +771,7 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
}
-void Deserializer::CommitNewInternalizedStrings(Isolate* isolate) {
+void Deserializer::CommitPostProcessedObjects(Isolate* isolate) {
StringTable::EnsureCapacityForDeserialization(
isolate, new_internalized_strings_.length());
for (Handle<String> string : new_internalized_strings_) {
@@ -771,6 +779,16 @@ void Deserializer::CommitNewInternalizedStrings(Isolate* isolate) {
DCHECK_NULL(StringTable::LookupKeyIfExists(isolate, &key));
StringTable::LookupKey(isolate, &key);
}
+
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ for (Handle<Script> script : new_scripts_) {
+ // Assign a new script id to avoid collision.
+ script->set_id(isolate_->heap()->NextScriptId());
+ // Add script to list.
+ Handle<Object> list = WeakFixedArray::Add(factory->script_list(), script);
+ heap->SetRootScriptList(*list);
+ }
}
@@ -946,8 +964,9 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
emit_write_barrier = (space_number == NEW_SPACE); \
new_object = GetBackReferencedObject(data & kSpaceMask); \
} else if (where == kRootArray) { \
- int root_id = source_.GetInt(); \
- new_object = isolate->heap()->roots_array_start()[root_id]; \
+ int id = source_.GetInt(); \
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id); \
+ new_object = isolate->heap()->root(root_index); \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kPartialSnapshotCache) { \
int cache_index = source_.GetInt(); \
@@ -1202,12 +1221,9 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
case kAlignmentPrefix:
case kAlignmentPrefix + 1:
- case kAlignmentPrefix + 2: {
- DCHECK_EQ(kWordAligned, next_alignment_);
- next_alignment_ =
- static_cast<AllocationAlignment>(data - (kAlignmentPrefix - 1));
+ case kAlignmentPrefix + 2:
+ SetAlignment(data);
break;
- }
STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
@@ -1221,8 +1237,9 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
SIXTEEN_CASES(kRootArrayConstants)
SIXTEEN_CASES(kRootArrayConstants + 16) {
- int root_id = data & kRootArrayConstantsMask;
- Object* object = isolate->heap()->roots_array_start()[root_id];
+ int id = data & kRootArrayConstantsMask;
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(id);
+ Object* object = isolate->heap()->root(root_index);
DCHECK(!isolate->heap()->InNewSpace(object));
UnalignedCopy(current++, &object);
break;
@@ -1367,6 +1384,66 @@ void Serializer::OutputStatistics(const char* name) {
}
+class Serializer::ObjectSerializer : public ObjectVisitor {
+ public:
+ ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink,
+ HowToCode how_to_code, WhereToPoint where_to_point)
+ : serializer_(serializer),
+ object_(HeapObject::cast(o)),
+ sink_(sink),
+ reference_representation_(how_to_code + where_to_point),
+ bytes_processed_so_far_(0),
+ is_code_object_(o->IsCode()),
+ code_has_been_output_(false) {}
+ void Serialize();
+ void SerializeDeferred();
+ void VisitPointers(Object** start, Object** end);
+ void VisitEmbeddedPointer(RelocInfo* target);
+ void VisitExternalReference(Address* p);
+ void VisitExternalReference(RelocInfo* rinfo);
+ void VisitInternalReference(RelocInfo* rinfo);
+ void VisitCodeTarget(RelocInfo* target);
+ void VisitCodeEntry(Address entry_address);
+ void VisitCell(RelocInfo* rinfo);
+ void VisitRuntimeEntry(RelocInfo* reloc);
+ // Used for seralizing the external strings that hold the natives source.
+ void VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource);
+ // We can't serialize a heap with external two byte strings.
+ void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) {
+ UNREACHABLE();
+ }
+
+ private:
+ void SerializePrologue(AllocationSpace space, int size, Map* map);
+
+ bool SerializeExternalNativeSourceString(
+ int builtin_count,
+ v8::String::ExternalOneByteStringResource** resource_pointer,
+ FixedArray* source_cache, int resource_index);
+
+ enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
+ // This function outputs or skips the raw data between the last pointer and
+ // up to the current position. It optionally can just return the number of
+ // bytes to skip instead of performing a skip instruction, in case the skip
+ // can be merged into the next instruction.
+ int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
+ // External strings are serialized in a way to resemble sequential strings.
+ void SerializeExternalString();
+
+ Address PrepareCode();
+
+ Serializer* serializer_;
+ HeapObject* object_;
+ SnapshotByteSink* sink_;
+ int reference_representation_;
+ int bytes_processed_so_far_;
+ bool is_code_object_;
+ bool code_has_been_output_;
+};
+
+
void Serializer::SerializeDeferredObjects() {
while (deferred_objects_.length() > 0) {
HeapObject* obj = deferred_objects_.RemoveLast();
@@ -1528,6 +1605,11 @@ void SerializerDeserializer::Iterate(Isolate* isolate,
}
+bool SerializerDeserializer::CanBeDeferred(HeapObject* o) {
+ return !o->IsString() && !o->IsScript();
+}
+
+
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
Isolate* isolate = this->isolate();
List<Object*>* cache = isolate->partial_snapshot_cache();
@@ -1548,6 +1630,19 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
}
+bool PartialSerializer::ShouldBeInThePartialSnapshotCache(HeapObject* o) {
+ // Scripts should be referred only through shared function infos. We can't
+ // allow them to be part of the partial snapshot because they contain a
+ // unique ID, and deserializing several partial snapshots containing script
+ // would cause dupes.
+ DCHECK(!o->IsScript());
+ return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
+ o->IsCode() || o->IsScopeInfo() || o->IsExecutableAccessorInfo() ||
+ o->map() ==
+ startup_serializer_->isolate()->heap()->fixed_cow_array_map();
+}
+
+
#ifdef DEBUG
bool Serializer::BackReferenceIsAlreadyAllocated(BackReference reference) {
DCHECK(reference.is_valid());
@@ -1631,6 +1726,17 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
}
+StartupSerializer::StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
+ : Serializer(isolate, sink), root_index_wave_front_(0) {
+ // Clear the cache of objects used by the partial snapshot. After the
+ // strong roots have been serialized we can create a partial snapshot
+ // which will repopulate the cache with objects needed by that partial
+ // snapshot.
+ isolate->partial_snapshot_cache()->Clear();
+ InitializeCodeAddressMap();
+}
+
+
void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) {
// Make sure that all functions are derived from the code-stub context
@@ -1982,6 +2088,7 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
CHECK_EQ(0, bytes_processed_so_far_);
bytes_processed_so_far_ = kPointerSize;
+ serializer_->PutAlignmentPrefix(object_);
sink_->Put(kNewObject + reference.space(), "deferred object");
serializer_->PutBackReference(object_, reference);
sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
@@ -2381,6 +2488,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
SerializeGeneric(code_object, how_to_code, where_to_point);
}
return;
+ case Code::WASM_FUNCTION:
+ UNREACHABLE();
}
UNREACHABLE();
}
@@ -2438,8 +2547,7 @@ void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
if (FLAG_trace_serializer) {
PrintF(" Encoding code stub %s as %d\n",
- CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
- index);
+ CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key)), index);
}
sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
@@ -2724,6 +2832,11 @@ SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
}
+uint32_t SerializedCodeData::SourceHash(String* source) const {
+ return source->length();
+}
+
+
// Return ScriptData object and relinquish ownership over it to the caller.
ScriptData* SerializedCodeData::GetScriptData() {
DCHECK(owns_data_);
diff --git a/deps/v8/src/snapshot/serialize.h b/deps/v8/src/snapshot/serialize.h
index e790062913..d5374a28e0 100644
--- a/deps/v8/src/snapshot/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -6,12 +6,14 @@
#define V8_SNAPSHOT_SERIALIZE_H_
#include "src/hashmap.h"
-#include "src/isolate.h"
+#include "src/heap/heap.h"
+#include "src/objects.h"
#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
+class Isolate;
class ScriptData;
static const int kDeoptTableSerializeEntryCount = 64;
@@ -308,9 +310,7 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNumberOfSpaces = LAST_SPACE + 1;
protected:
- static bool CanBeDeferred(HeapObject* o) {
- return !o->IsString() && !o->IsScript();
- }
+ static bool CanBeDeferred(HeapObject* o);
// ---------- byte code range 0x00..0x7f ----------
// Byte codes in this range represent Where, HowToCode and WhereToPoint.
@@ -572,12 +572,20 @@ class Deserializer: public SerializerDeserializer {
memcpy(dest, src, sizeof(*src));
}
+ void SetAlignment(byte data) {
+ DCHECK_EQ(kWordAligned, next_alignment_);
+ int alignment = data - (kAlignmentPrefix - 1);
+ DCHECK_LE(kWordAligned, alignment);
+ DCHECK_LE(alignment, kSimd128Unaligned);
+ next_alignment_ = static_cast<AllocationAlignment>(alignment);
+ }
+
void DeserializeDeferredObjects();
void FlushICacheForNewIsolate();
void FlushICacheForNewCodeObjects();
- void CommitNewInternalizedStrings(Isolate* isolate);
+ void CommitPostProcessedObjects(Isolate* isolate);
// Fills in some heap data in an area from start to end (non-inclusive). The
// space id is used for the write barrier. The object_address is the address
@@ -621,6 +629,7 @@ class Deserializer: public SerializerDeserializer {
List<HeapObject*> deserialized_large_objects_;
List<Code*> new_code_objects_;
List<Handle<String> > new_internalized_strings_;
+ List<Handle<Script> > new_scripts_;
bool deserializing_user_code_;
@@ -653,65 +662,7 @@ class Serializer : public SerializerDeserializer {
#endif // OBJECT_PRINT
protected:
- class ObjectSerializer : public ObjectVisitor {
- public:
- ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink,
- HowToCode how_to_code, WhereToPoint where_to_point)
- : serializer_(serializer),
- object_(HeapObject::cast(o)),
- sink_(sink),
- reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0),
- is_code_object_(o->IsCode()),
- code_has_been_output_(false) {}
- void Serialize();
- void SerializeDeferred();
- void VisitPointers(Object** start, Object** end);
- void VisitEmbeddedPointer(RelocInfo* target);
- void VisitExternalReference(Address* p);
- void VisitExternalReference(RelocInfo* rinfo);
- void VisitInternalReference(RelocInfo* rinfo);
- void VisitCodeTarget(RelocInfo* target);
- void VisitCodeEntry(Address entry_address);
- void VisitCell(RelocInfo* rinfo);
- void VisitRuntimeEntry(RelocInfo* reloc);
- // Used for seralizing the external strings that hold the natives source.
- void VisitExternalOneByteString(
- v8::String::ExternalOneByteStringResource** resource);
- // We can't serialize a heap with external two byte strings.
- void VisitExternalTwoByteString(
- v8::String::ExternalStringResource** resource) {
- UNREACHABLE();
- }
-
- private:
- void SerializePrologue(AllocationSpace space, int size, Map* map);
-
- bool SerializeExternalNativeSourceString(
- int builtin_count,
- v8::String::ExternalOneByteStringResource** resource_pointer,
- FixedArray* source_cache, int resource_index);
-
- enum ReturnSkip { kCanReturnSkipInsteadOfSkipping, kIgnoringReturn };
- // This function outputs or skips the raw data between the last pointer and
- // up to the current position. It optionally can just return the number of
- // bytes to skip instead of performing a skip instruction, in case the skip
- // can be merged into the next instruction.
- int OutputRawData(Address up_to, ReturnSkip return_skip = kIgnoringReturn);
- // External strings are serialized in a way to resemble sequential strings.
- void SerializeExternalString();
-
- Address PrepareCode();
-
- Serializer* serializer_;
- HeapObject* object_;
- SnapshotByteSink* sink_;
- int reference_representation_;
- int bytes_processed_so_far_;
- bool is_code_object_;
- bool code_has_been_output_;
- };
-
+ class ObjectSerializer;
class RecursionScope {
public:
explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
@@ -850,17 +801,7 @@ class PartialSerializer : public Serializer {
private:
int PartialSnapshotCacheIndex(HeapObject* o);
- bool ShouldBeInThePartialSnapshotCache(HeapObject* o) {
- // Scripts should be referred only through shared function infos. We can't
- // allow them to be part of the partial snapshot because they contain a
- // unique ID, and deserializing several partial snapshots containing script
- // would cause dupes.
- DCHECK(!o->IsScript());
- return o->IsName() || o->IsSharedFunctionInfo() || o->IsHeapNumber() ||
- o->IsCode() || o->IsScopeInfo() || o->IsExecutableAccessorInfo() ||
- o->map() ==
- startup_serializer_->isolate()->heap()->fixed_cow_array_map();
- }
+ bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
void SerializeOutdatedContextsAsFixedArray();
@@ -874,16 +815,7 @@ class PartialSerializer : public Serializer {
class StartupSerializer : public Serializer {
public:
- StartupSerializer(Isolate* isolate, SnapshotByteSink* sink)
- : Serializer(isolate, sink), root_index_wave_front_(0) {
- // Clear the cache of objects used by the partial snapshot. After the
- // strong roots have been serialized we can create a partial snapshot
- // which will repopulate the cache with objects needed by that partial
- // snapshot.
- isolate->partial_snapshot_cache()->Clear();
- InitializeCodeAddressMap();
- }
-
+ StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
~StartupSerializer() { OutputStatistics("StartupSerializer"); }
// The StartupSerializer has to serialize the root array, which is slightly
@@ -1030,7 +962,7 @@ class SerializedCodeData : public SerializedData {
SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
- uint32_t SourceHash(String* source) const { return source->length(); }
+ uint32_t SourceHash(String* source) const;
// The data header consists of uint32_t-sized entries:
// [0] magic number and external reference count
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 743178b51b..0b7e11d1ec 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -4,12 +4,11 @@
// The common functionality when building with or without snapshots.
-#include "src/v8.h"
+#include "src/snapshot/snapshot.h"
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen/full-codegen.h"
-#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -22,6 +21,13 @@ bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
#endif // DEBUG
+bool Snapshot::HaveASnapshotToStartFrom(Isolate* isolate) {
+ // Do not use snapshots if the isolate is used to create snapshots.
+ return isolate->snapshot_blob() != NULL &&
+ isolate->snapshot_blob()->data != NULL;
+}
+
+
bool Snapshot::EmbedsScript(Isolate* isolate) {
if (!isolate->snapshot_available()) return false;
return ExtractMetadata(isolate->snapshot_blob()).embeds_script();
diff --git a/deps/v8/src/snapshot/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index 464d3a800a..35cb6c38f5 100644
--- a/deps/v8/src/snapshot/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -4,8 +4,6 @@
// Used for building without snapshots.
-#include "src/v8.h"
-
#include "src/snapshot/snapshot.h"
namespace v8 {
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 7f89213f16..1379644fd8 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -2,15 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/isolate.h"
-#include "src/snapshot/serialize.h"
-
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
+#include "src/snapshot/serialize.h"
+
namespace v8 {
namespace internal {
+// Forward declarations.
+class Isolate;
+class PartialSerializer;
+class StartupSerializer;
+
class Snapshot : public AllStatic {
public:
class Metadata {
@@ -36,11 +40,7 @@ class Snapshot : public AllStatic {
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
Handle<FixedArray>* outdated_contexts_out);
- static bool HaveASnapshotToStartFrom(Isolate* isolate) {
- // Do not use snapshots if the isolate is used to create snapshots.
- return isolate->snapshot_blob() != NULL &&
- isolate->snapshot_blob()->data != NULL;
- }
+ static bool HaveASnapshotToStartFrom(Isolate* isolate);
static bool EmbedsScript(Isolate* isolate);
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
index 30c64b3c6d..e24def6b68 100644
--- a/deps/v8/src/string-builder.cc
+++ b/deps/v8/src/string-builder.cc
@@ -4,6 +4,7 @@
#include "src/string-builder.h"
+#include "src/isolate-inl.h"
#include "src/objects-inl.h"
namespace v8 {
diff --git a/deps/v8/src/string-iterator.js b/deps/v8/src/string-iterator.js
index bb392ef10e..660dc7c98b 100644
--- a/deps/v8/src/string-iterator.js
+++ b/deps/v8/src/string-iterator.js
@@ -12,26 +12,21 @@
// Imports
var GlobalString = global.String;
-
-var ArrayIteratorCreateResultObject;
-
-utils.Import(function(from) {
- ArrayIteratorCreateResultObject = from.ArrayIteratorCreateResultObject;
-});
-
-// -------------------------------------------------------------------
-
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
var stringIteratorIteratedStringSymbol =
- GLOBAL_PRIVATE("StringIterator#iteratedString");
-var stringIteratorNextIndexSymbol = GLOBAL_PRIVATE("StringIterator#next");
+ utils.ImportNow("string_iterator_iterated_string_symbol");
+var stringIteratorNextIndexSymbol =
+ utils.ImportNow("string_iterator_next_index_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+// -------------------------------------------------------------------
function StringIterator() {}
// 21.1.5.1 CreateStringIterator Abstract Operation
function CreateStringIterator(string) {
- var s = TO_STRING_INLINE(string);
+ var s = TO_STRING(string);
var iterator = new StringIterator;
SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s);
SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, 0);
@@ -39,44 +34,42 @@ function CreateStringIterator(string) {
}
-// 21.1.5.2.1 %StringIteratorPrototype%.next( )
+// ES6 section 21.1.5.2.1 %StringIteratorPrototype%.next ( )
function StringIteratorNext() {
- var iterator = TO_OBJECT(this);
+ var iterator = this;
+ var value = UNDEFINED;
+ var done = true;
- if (!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
+ if (!IS_SPEC_OBJECT(iterator) ||
+ !HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
throw MakeTypeError(kIncompatibleMethodReceiver,
'String Iterator.prototype.next');
}
var s = GET_PRIVATE(iterator, stringIteratorIteratedStringSymbol);
- if (IS_UNDEFINED(s)) {
- return ArrayIteratorCreateResultObject(UNDEFINED, true);
- }
-
- var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol);
- var length = TO_UINT32(s.length);
-
- if (position >= length) {
- SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol,
- UNDEFINED);
- return ArrayIteratorCreateResultObject(UNDEFINED, true);
- }
+ if (!IS_UNDEFINED(s)) {
+ var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol);
+ var length = TO_UINT32(s.length);
+ if (position >= length) {
+ SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, UNDEFINED);
+ } else {
+ var first = %_StringCharCodeAt(s, position);
+ value = %_StringCharFromCode(first);
+ done = false;
+ position++;
- var first = %_StringCharCodeAt(s, position);
- var resultString = %_StringCharFromCode(first);
- position++;
+ if (first >= 0xD800 && first <= 0xDBFF && position < length) {
+ var second = %_StringCharCodeAt(s, position);
+ if (second >= 0xDC00 && second <= 0xDFFF) {
+ value += %_StringCharFromCode(second);
+ position++;
+ }
+ }
- if (first >= 0xD800 && first <= 0xDBFF && position < length) {
- var second = %_StringCharCodeAt(s, position);
- if (second >= 0xDC00 && second <= 0xDFFF) {
- resultString += %_StringCharFromCode(second);
- position++;
+ SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position);
}
}
-
- SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position);
-
- return ArrayIteratorCreateResultObject(resultString, false);
+ return %_CreateIterResultObject(value, done);
}
@@ -93,11 +86,11 @@ function StringPrototypeIterator() {
utils.InstallFunctions(StringIterator.prototype, DONT_ENUM, [
'next', StringIteratorNext
]);
-%AddNamedProperty(StringIterator.prototype, symbolToStringTag,
+%AddNamedProperty(StringIterator.prototype, toStringTagSymbol,
"String Iterator", READ_ONLY | DONT_ENUM);
-utils.SetFunctionName(StringPrototypeIterator, symbolIterator);
-%AddNamedProperty(GlobalString.prototype, symbolIterator,
+utils.SetFunctionName(StringPrototypeIterator, iteratorSymbol);
+%AddNamedProperty(GlobalString.prototype, iteratorSymbol,
StringPrototypeIterator, DONT_ENUM);
})
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index bf5ffe6b2d..c0cc2cad4b 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -5,6 +5,9 @@
#ifndef V8_STRING_SEARCH_H_
#define V8_STRING_SEARCH_H_
+#include "src/isolate.h"
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
@@ -187,6 +190,46 @@ class StringSearch : private StringSearchBase {
};
+template <typename T, typename U>
+inline T AlignDown(T value, U alignment) {
+ return reinterpret_cast<T>(
+ (reinterpret_cast<uintptr_t>(value) & ~(alignment - 1)));
+}
+
+
+inline uint8_t GetHighestValueByte(uc16 character) {
+ return Max(static_cast<uint8_t>(character & 0xFF),
+ static_cast<uint8_t>(character >> 8));
+}
+
+
+inline uint8_t GetHighestValueByte(uint8_t character) { return character; }
+
+
+template <typename PatternChar, typename SubjectChar>
+inline int FindFirstCharacter(Vector<const PatternChar> pattern,
+ Vector<const SubjectChar> subject, int index) {
+ const PatternChar pattern_first_char = pattern[0];
+ const int max_n = (subject.length() - pattern.length() + 1);
+
+ const uint8_t search_byte = GetHighestValueByte(pattern_first_char);
+ const SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
+ int pos = index;
+ do {
+ DCHECK_GE(max_n - pos, 0);
+ const SubjectChar* char_pos = reinterpret_cast<const SubjectChar*>(
+ memchr(subject.start() + pos, search_byte,
+ (max_n - pos) * sizeof(SubjectChar)));
+ if (char_pos == NULL) return -1;
+ char_pos = AlignDown(char_pos, sizeof(SubjectChar));
+ pos = static_cast<int>(char_pos - subject.start());
+ if (subject[pos] == search_char) return pos;
+ } while (++pos < max_n);
+
+ return -1;
+}
+
+
//---------------------------------------------------------------------
// Single Character Pattern Search Strategy
//---------------------------------------------------------------------
@@ -198,27 +241,12 @@ int StringSearch<PatternChar, SubjectChar>::SingleCharSearch(
int index) {
DCHECK_EQ(1, search->pattern_.length());
PatternChar pattern_first_char = search->pattern_[0];
- int i = index;
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- subject.length() - i));
- if (pos == NULL) return -1;
- return static_cast<int>(pos - subject.start());
- } else {
- if (sizeof(PatternChar) > sizeof(SubjectChar)) {
- if (exceedsOneByte(pattern_first_char)) {
- return -1;
- }
+ if (sizeof(PatternChar) > sizeof(SubjectChar)) {
+ if (exceedsOneByte(pattern_first_char)) {
+ return -1;
}
- SubjectChar search_char = static_cast<SubjectChar>(pattern_first_char);
- int n = subject.length();
- while (i < n) {
- if (subject[i++] == search_char) return i - 1;
- }
- return -1;
}
+ return FindFirstCharacter(search->pattern_, subject, index);
}
//---------------------------------------------------------------------
@@ -251,20 +279,13 @@ int StringSearch<PatternChar, SubjectChar>::LinearSearch(
Vector<const PatternChar> pattern = search->pattern_;
DCHECK(pattern.length() > 1);
int pattern_length = pattern.length();
- PatternChar pattern_first_char = pattern[0];
int i = index;
int n = subject.length() - pattern_length;
while (i <= n) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) return -1;
- i = static_cast<int>(pos - subject.start()) + 1;
- } else {
- if (subject[i++] != pattern_first_char) continue;
- }
+ i = FindFirstCharacter(pattern, subject, i);
+ if (i == -1) return -1;
+ DCHECK_LE(i, n);
+ i++;
// Loop extracted to separate function to allow using return to do
// a deeper break.
if (CharCompare(pattern.start() + 1,
@@ -502,22 +523,12 @@ int StringSearch<PatternChar, SubjectChar>::InitialSearch(
// We know our pattern is at least 2 characters, we cache the first so
// the common case of the first character not matching is faster.
- PatternChar pattern_first_char = pattern[0];
for (int i = index, n = subject.length() - pattern_length; i <= n; i++) {
badness++;
if (badness <= 0) {
- if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
- const SubjectChar* pos = reinterpret_cast<const SubjectChar*>(
- memchr(subject.start() + i,
- pattern_first_char,
- n - i + 1));
- if (pos == NULL) {
- return -1;
- }
- i = static_cast<int>(pos - subject.start());
- } else {
- if (subject[i] != pattern_first_char) continue;
- }
+ i = FindFirstCharacter(pattern, subject, i);
+ if (i == -1) return -1;
+ DCHECK_LE(i, n);
int j = 1;
do {
if (pattern[j] != subject[i + j]) {
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 92fff27683..2801d23cda 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -531,7 +531,7 @@ void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) {
PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent()->IsJSObject()) {
- Object* key = JSObject::cast(iter.GetCurrent())->SlowReverseLookup(fun);
+ Object* key = iter.GetCurrent<JSObject>()->SlowReverseLookup(fun);
if (key != isolate->heap()->undefined_value()) {
if (!name->IsString() ||
!key->IsString() ||
diff --git a/deps/v8/src/string-stream.h b/deps/v8/src/string-stream.h
index d03f1b04c9..2f11b182d0 100644
--- a/deps/v8/src/string-stream.h
+++ b/deps/v8/src/string-stream.h
@@ -100,7 +100,7 @@ class StringStream final {
public:
enum ObjectPrintMode { kPrintObjectConcise, kPrintObjectVerbose };
StringStream(StringAllocator* allocator,
- ObjectPrintMode object_print_mode = kPrintObjectConcise)
+ ObjectPrintMode object_print_mode = kPrintObjectVerbose)
: allocator_(allocator),
object_print_mode_(object_print_mode),
capacity_(kInitialCapacity),
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 8e7fc6c01c..bd20226757 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -15,39 +15,22 @@ var GlobalRegExp = global.RegExp;
var GlobalString = global.String;
var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
-var MathMax;
-var MathMin;
var RegExpExec;
var RegExpExecNoTests;
var RegExpLastMatchInfo;
var ToNumber;
-var ToString;
utils.Import(function(from) {
ArrayIndexOf = from.ArrayIndexOf;
ArrayJoin = from.ArrayJoin;
- MathMax = from.MathMax;
- MathMin = from.MathMin;
RegExpExec = from.RegExpExec;
RegExpExecNoTests = from.RegExpExecNoTests;
RegExpLastMatchInfo = from.RegExpLastMatchInfo;
ToNumber = from.ToNumber;
- ToString = from.ToString;
});
//-------------------------------------------------------------------
-function StringConstructor(x) {
- if (%_ArgumentsLength() == 0) x = '';
- if (%_IsConstructCall()) {
- %_SetValueOf(this, TO_STRING_INLINE(x));
- } else {
- return IS_SYMBOL(x) ?
- %_CallFunction(x, $symbolToString) : TO_STRING_INLINE(x);
- }
-}
-
-
// ECMA-262 section 15.5.4.2
function StringToString() {
if (!IS_STRING(this) && !IS_STRING_WRAPPER(this)) {
@@ -72,7 +55,7 @@ function StringCharAtJS(pos) {
var result = %_StringCharAt(this, pos);
if (%_IsSmi(result)) {
- result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
+ result = %_StringCharAt(TO_STRING(this), TO_INTEGER(pos));
}
return result;
}
@@ -84,7 +67,7 @@ function StringCharCodeAtJS(pos) {
var result = %_StringCharCodeAt(this, pos);
if (!%_IsSmi(result)) {
- result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
+ result = %_StringCharCodeAt(TO_STRING(this), TO_INTEGER(pos));
}
return result;
}
@@ -94,15 +77,15 @@ function StringCharCodeAtJS(pos) {
function StringConcat(other /* and more */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
var len = %_ArgumentsLength();
- var this_as_string = TO_STRING_INLINE(this);
+ var this_as_string = TO_STRING(this);
if (len === 1) {
- return this_as_string + TO_STRING_INLINE(other);
+ return this_as_string + TO_STRING(other);
}
var parts = new InternalArray(len + 1);
parts[0] = this_as_string;
for (var i = 0; i < len; i++) {
var part = %_Arguments(i);
- parts[i + 1] = TO_STRING_INLINE(part);
+ parts[i + 1] = TO_STRING(part);
}
return %StringBuilderConcat(parts, len + 1, "");
}
@@ -112,8 +95,8 @@ function StringConcat(other /* and more */) { // length == 1
function StringIndexOfJS(pattern /* position */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.indexOf");
- var subject = TO_STRING_INLINE(this);
- pattern = TO_STRING_INLINE(pattern);
+ var subject = TO_STRING(this);
+ pattern = TO_STRING(pattern);
var index = 0;
if (%_ArgumentsLength() > 1) {
index = %_Arguments(1); // position
@@ -129,9 +112,9 @@ function StringIndexOfJS(pattern /* position */) { // length == 1
function StringLastIndexOfJS(pat /* position */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.lastIndexOf");
- var sub = TO_STRING_INLINE(this);
+ var sub = TO_STRING(this);
var subLength = sub.length;
- var pat = TO_STRING_INLINE(pat);
+ var pat = TO_STRING(pat);
var patLength = pat.length;
var index = subLength - patLength;
if (%_ArgumentsLength() > 1) {
@@ -160,8 +143,7 @@ function StringLastIndexOfJS(pat /* position */) { // length == 1
function StringLocaleCompareJS(other) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.localeCompare");
- return %StringLocaleCompare(TO_STRING_INLINE(this),
- TO_STRING_INLINE(other));
+ return %StringLocaleCompare(TO_STRING(this), TO_STRING(other));
}
@@ -169,12 +151,11 @@ function StringLocaleCompareJS(other) {
function StringMatchJS(regexp) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.match");
- var subject = TO_STRING_INLINE(this);
+ var subject = TO_STRING(this);
if (IS_REGEXP(regexp)) {
// Emulate RegExp.prototype.exec's side effect in step 5, even though
// value is discarded.
- var lastIndex = regexp.lastIndex;
- TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
+ var lastIndex = TO_INTEGER(regexp.lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
var result = %StringMatch(subject, regexp, RegExpLastMatchInfo);
if (result !== null) $regexpLastMatchInfoOverride = null;
@@ -194,10 +175,10 @@ function StringMatchJS(regexp) {
// proper functionality.
function StringNormalizeJS() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
var formArg = %_Arguments(0);
- var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING_INLINE(formArg);
+ var form = IS_UNDEFINED(formArg) ? 'NFC' : TO_STRING(formArg);
var NORMALIZATION_FORMS = ['NFC', 'NFD', 'NFKC', 'NFKD'];
var normalizationForm =
@@ -223,7 +204,7 @@ var reusableMatchInfo = [2, "", "", -1, -1];
function StringReplace(search, replace) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.replace");
- var subject = TO_STRING_INLINE(this);
+ var subject = TO_STRING(this);
// Decision tree for dispatch
// .. regexp search
@@ -245,11 +226,10 @@ function StringReplace(search, replace) {
if (IS_REGEXP(search)) {
// Emulate RegExp.prototype.exec's side effect in step 5, even if
// value is discarded.
- var lastIndex = search.lastIndex;
- TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
+ var lastIndex = TO_INTEGER(search.lastIndex);
- if (!IS_SPEC_FUNCTION(replace)) {
- replace = TO_STRING_INLINE(replace);
+ if (!IS_CALLABLE(replace)) {
+ replace = TO_STRING(replace);
if (!search.global) {
// Non-global regexp search, string replace.
@@ -297,7 +277,7 @@ function StringReplace(search, replace) {
return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace);
}
- search = TO_STRING_INLINE(search);
+ search = TO_STRING(search);
if (search.length == 1 &&
subject.length > 0xFF &&
@@ -315,13 +295,12 @@ function StringReplace(search, replace) {
var result = %_SubString(subject, 0, start);
// Compute the string to replace with.
- if (IS_SPEC_FUNCTION(replace)) {
- var receiver = UNDEFINED;
- result += %_CallFunction(receiver, search, start, subject, replace);
+ if (IS_CALLABLE(replace)) {
+ result += replace(search, start, subject);
} else {
reusableMatchInfo[CAPTURE0] = start;
reusableMatchInfo[CAPTURE1] = end;
- result = ExpandReplacement(TO_STRING_INLINE(replace),
+ result = ExpandReplacement(TO_STRING(replace),
subject,
reusableMatchInfo,
result);
@@ -480,11 +459,10 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
override[0] = elem;
override[1] = match_start;
$regexpLastMatchInfoOverride = override;
- var func_result =
- %_CallFunction(UNDEFINED, elem, match_start, subject, replace);
+ var func_result = replace(elem, match_start, subject);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
- res[i] = TO_STRING_INLINE(func_result);
+ res[i] = TO_STRING(func_result);
match_start += elem.length;
}
}
@@ -498,7 +476,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
var func_result = %Apply(replace, UNDEFINED, elem, 0, elem.length);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
- res[i] = TO_STRING_INLINE(func_result);
+ res[i] = TO_STRING(func_result);
}
}
}
@@ -527,7 +505,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
// No captures, only the match, which is always valid.
var s = %_SubString(subject, index, endOfMatch);
// Don't call directly to avoid exposing the built-in global object.
- replacement = %_CallFunction(UNDEFINED, s, index, subject, replace);
+ replacement = replace(s, index, subject);
} else {
var parameters = new InternalArray(m + 2);
for (var j = 0; j < m; j++) {
@@ -556,7 +534,7 @@ function StringSearch(re) {
} else {
regexp = new GlobalRegExp(re);
}
- var match = RegExpExec(regexp, TO_STRING_INLINE(this), 0);
+ var match = RegExpExec(regexp, TO_STRING(this), 0);
if (match) {
return match[CAPTURE0];
}
@@ -568,7 +546,7 @@ function StringSearch(re) {
function StringSlice(start, end) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.slice");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
var s_len = s.length;
var start_i = TO_INTEGER(start);
var end_i = s_len;
@@ -610,12 +588,12 @@ function StringSlice(start, end) {
function StringSplitJS(separator, limit) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.split");
- var subject = TO_STRING_INLINE(this);
+ var subject = TO_STRING(this);
limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
var length = subject.length;
if (!IS_REGEXP(separator)) {
- var separator_string = TO_STRING_INLINE(separator);
+ var separator_string = TO_STRING(separator);
if (limit === 0) return [];
@@ -702,7 +680,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
function StringSubstring(start, end) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.subString");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
var s_len = s.length;
var start_i = TO_INTEGER(start);
@@ -735,7 +713,7 @@ function StringSubstring(start, end) {
function StringSubstr(start, n) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
var len;
// Correct n: If not given, set to string length; if explicitly
@@ -775,7 +753,7 @@ function StringSubstr(start, n) {
function StringToLowerCaseJS() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
- return %StringToLowerCase(TO_STRING_INLINE(this));
+ return %StringToLowerCase(TO_STRING(this));
}
@@ -783,7 +761,7 @@ function StringToLowerCaseJS() {
function StringToLocaleLowerCase() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
- return %StringToLowerCase(TO_STRING_INLINE(this));
+ return %StringToLowerCase(TO_STRING(this));
}
@@ -791,7 +769,7 @@ function StringToLocaleLowerCase() {
function StringToUpperCaseJS() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
- return %StringToUpperCase(TO_STRING_INLINE(this));
+ return %StringToUpperCase(TO_STRING(this));
}
@@ -799,26 +777,26 @@ function StringToUpperCaseJS() {
function StringToLocaleUpperCase() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
- return %StringToUpperCase(TO_STRING_INLINE(this));
+ return %StringToUpperCase(TO_STRING(this));
}
// ES5, 15.5.4.20
function StringTrimJS() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.trim");
- return %StringTrim(TO_STRING_INLINE(this), true, true);
+ return %StringTrim(TO_STRING(this), true, true);
}
function StringTrimLeft() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimLeft");
- return %StringTrim(TO_STRING_INLINE(this), true, false);
+ return %StringTrim(TO_STRING(this), true, false);
}
function StringTrimRight() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.trimRight");
- return %StringTrim(TO_STRING_INLINE(this), false, true);
+ return %StringTrim(TO_STRING(this), false, true);
}
@@ -854,14 +832,14 @@ function StringFromCharCode(code) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
- return %_CallFunction(TO_STRING_INLINE(str), /"/g, "&quot;", StringReplace);
+ return %_CallFunction(TO_STRING(str), /"/g, "&quot;", StringReplace);
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2
function StringAnchor(name) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor");
- return "<a name=\"" + HtmlEscape(name) + "\">" + TO_STRING_INLINE(this) +
+ return "<a name=\"" + HtmlEscape(name) + "\">" + TO_STRING(this) +
"</a>";
}
@@ -869,35 +847,35 @@ function StringAnchor(name) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.3
function StringBig() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.big");
- return "<big>" + TO_STRING_INLINE(this) + "</big>";
+ return "<big>" + TO_STRING(this) + "</big>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.4
function StringBlink() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink");
- return "<blink>" + TO_STRING_INLINE(this) + "</blink>";
+ return "<blink>" + TO_STRING(this) + "</blink>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.5
function StringBold() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold");
- return "<b>" + TO_STRING_INLINE(this) + "</b>";
+ return "<b>" + TO_STRING(this) + "</b>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.6
function StringFixed() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed");
- return "<tt>" + TO_STRING_INLINE(this) + "</tt>";
+ return "<tt>" + TO_STRING(this) + "</tt>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.7
function StringFontcolor(color) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor");
- return "<font color=\"" + HtmlEscape(color) + "\">" + TO_STRING_INLINE(this) +
+ return "<font color=\"" + HtmlEscape(color) + "\">" + TO_STRING(this) +
"</font>";
}
@@ -905,7 +883,7 @@ function StringFontcolor(color) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.8
function StringFontsize(size) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize");
- return "<font size=\"" + HtmlEscape(size) + "\">" + TO_STRING_INLINE(this) +
+ return "<font size=\"" + HtmlEscape(size) + "\">" + TO_STRING(this) +
"</font>";
}
@@ -913,50 +891,50 @@ function StringFontsize(size) {
// ES6 draft, revision 26 (2014-07-18), section B.2.3.9
function StringItalics() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics");
- return "<i>" + TO_STRING_INLINE(this) + "</i>";
+ return "<i>" + TO_STRING(this) + "</i>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.10
function StringLink(s) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.link");
- return "<a href=\"" + HtmlEscape(s) + "\">" + TO_STRING_INLINE(this) + "</a>";
+ return "<a href=\"" + HtmlEscape(s) + "\">" + TO_STRING(this) + "</a>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.11
function StringSmall() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.small");
- return "<small>" + TO_STRING_INLINE(this) + "</small>";
+ return "<small>" + TO_STRING(this) + "</small>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.12
function StringStrike() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike");
- return "<strike>" + TO_STRING_INLINE(this) + "</strike>";
+ return "<strike>" + TO_STRING(this) + "</strike>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.13
function StringSub() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub");
- return "<sub>" + TO_STRING_INLINE(this) + "</sub>";
+ return "<sub>" + TO_STRING(this) + "</sub>";
}
// ES6 draft, revision 26 (2014-07-18), section B.2.3.14
function StringSup() {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup");
- return "<sup>" + TO_STRING_INLINE(this) + "</sup>";
+ return "<sup>" + TO_STRING(this) + "</sup>";
}
// ES6 draft 01-20-14, section 21.1.3.13
function StringRepeat(count) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
- var s = TO_STRING_INLINE(this);
- var n = $toInteger(count);
+ var s = TO_STRING(this);
+ var n = TO_INTEGER(count);
// The maximum string length is stored in a smi, so a longer repeat
// must result in a range error.
if (n < 0 || n > %_MaxSmi()) throw MakeRangeError(kInvalidCountValue);
@@ -975,27 +953,37 @@ function StringRepeat(count) {
function StringStartsWith(searchString /* position */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
if (IS_REGEXP(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.startsWith");
}
- var ss = TO_STRING_INLINE(searchString);
+ var ss = TO_STRING(searchString);
var pos = 0;
if (%_ArgumentsLength() > 1) {
- pos = %_Arguments(1); // position
- pos = $toInteger(pos);
+ var arg = %_Arguments(1); // position
+ if (!IS_UNDEFINED(arg)) {
+ pos = TO_INTEGER(arg);
+ }
}
var s_len = s.length;
- var start = MathMin(MathMax(pos, 0), s_len);
+ if (pos < 0) pos = 0;
+ if (pos > s_len) pos = s_len;
var ss_len = ss.length;
- if (ss_len + start > s_len) {
+
+ if (ss_len + pos > s_len) {
return false;
}
- return %StringIndexOf(s, ss, start) === start;
+ for (var i = 0; i < ss_len; i++) {
+ if (%_StringCharCodeAt(s, pos + i) !== %_StringCharCodeAt(ss, i)) {
+ return false;
+ }
+ }
+
+ return true;
}
@@ -1003,30 +991,38 @@ function StringStartsWith(searchString /* position */) { // length == 1
function StringEndsWith(searchString /* position */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
- var s = TO_STRING_INLINE(this);
+ var s = TO_STRING(this);
if (IS_REGEXP(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.endsWith");
}
- var ss = TO_STRING_INLINE(searchString);
+ var ss = TO_STRING(searchString);
var s_len = s.length;
var pos = s_len;
if (%_ArgumentsLength() > 1) {
var arg = %_Arguments(1); // position
if (!IS_UNDEFINED(arg)) {
- pos = $toInteger(arg);
+ pos = TO_INTEGER(arg);
}
}
- var end = MathMin(MathMax(pos, 0), s_len);
+ if (pos < 0) pos = 0;
+ if (pos > s_len) pos = s_len;
var ss_len = ss.length;
- var start = end - ss_len;
- if (start < 0) {
+ pos = pos - ss_len;
+
+ if (pos < 0) {
return false;
}
- return %StringLastIndexOf(s, ss, start) === start;
+ for (var i = 0; i < ss_len; i++) {
+ if (%_StringCharCodeAt(s, pos + i) !== %_StringCharCodeAt(ss, i)) {
+ return false;
+ }
+ }
+
+ return true;
}
@@ -1034,13 +1030,13 @@ function StringEndsWith(searchString /* position */) { // length == 1
function StringIncludes(searchString /* position */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
- var string = TO_STRING_INLINE(this);
+ var string = TO_STRING(this);
if (IS_REGEXP(searchString)) {
throw MakeTypeError(kFirstArgumentNotRegExp, "String.prototype.includes");
}
- searchString = TO_STRING_INLINE(searchString);
+ searchString = TO_STRING(searchString);
var pos = 0;
if (%_ArgumentsLength() > 1) {
pos = %_Arguments(1); // position
@@ -1064,7 +1060,7 @@ function StringIncludes(searchString /* position */) { // length == 1
function StringCodePointAt(pos) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
- var string = TO_STRING_INLINE(this);
+ var string = TO_STRING(this);
var size = string.length;
pos = TO_INTEGER(pos);
if (pos < 0 || pos >= size) {
@@ -1117,16 +1113,16 @@ function StringRaw(callSite) {
var numberOfSubstitutions = %_ArgumentsLength();
var cooked = TO_OBJECT(callSite);
var raw = TO_OBJECT(cooked.raw);
- var literalSegments = $toLength(raw.length);
+ var literalSegments = TO_LENGTH(raw.length);
if (literalSegments <= 0) return "";
- var result = ToString(raw[0]);
+ var result = TO_STRING(raw[0]);
for (var i = 1; i < literalSegments; ++i) {
if (i < numberOfSubstitutions) {
- result += ToString(%_Arguments(i));
+ result += TO_STRING(%_Arguments(i));
}
- result += ToString(raw[i]);
+ result += TO_STRING(raw[i]);
}
return result;
@@ -1135,7 +1131,6 @@ function StringRaw(callSite) {
// -------------------------------------------------------------------
// Set the String function and constructor.
-%SetCode(GlobalString, StringConstructor);
%FunctionSetPrototype(GlobalString, new GlobalString());
// Set up the constructor property on the String prototype object.
diff --git a/deps/v8/src/strings-storage.cc b/deps/v8/src/strings-storage.cc
index 8ddf291fcc..52cc00f852 100644
--- a/deps/v8/src/strings-storage.cc
+++ b/deps/v8/src/strings-storage.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/strings-storage.h"
+#include "src/base/smart-pointers.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index 8cac2c56a2..1596169685 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -2,16 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Expects following symbols to be set in the bootstrapper during genesis:
-// - symbolHasInstance
-// - symbolIsConcatSpreadable
-// - symbolIsRegExp
-// - symbolIterator
-// - symbolToStringTag
-// - symbolUnscopables
-
-var $symbolToString;
-
(function(global, utils) {
"use strict";
@@ -23,20 +13,29 @@ var $symbolToString;
var GlobalObject = global.Object;
var GlobalSymbol = global.Symbol;
+var hasInstanceSymbol = utils.ImportNow("has_instance_symbol");
+var isConcatSpreadableSymbol =
+ utils.ImportNow("is_concat_spreadable_symbol");
+var isRegExpSymbol = utils.ImportNow("is_regexp_symbol");
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
var ObjectGetOwnPropertyKeys;
-var ToString;
+var toPrimitiveSymbol = utils.ImportNow("to_primitive_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
+var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
utils.Import(function(from) {
ObjectGetOwnPropertyKeys = from.ObjectGetOwnPropertyKeys;
- ToString = from.ToString;
});
// -------------------------------------------------------------------
-function SymbolConstructor(x) {
- if (%_IsConstructCall()) throw MakeTypeError(kNotConstructor, "Symbol");
- // NOTE: Passing in a Symbol value will throw on ToString().
- return %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
+// 19.4.3.4 Symbol.prototype [ @@toPrimitive ] ( hint )
+function SymbolToPrimitive(hint) {
+ if (!(IS_SYMBOL(this) || IS_SYMBOL_WRAPPER(this))) {
+ throw MakeTypeError(kIncompatibleMethodReceiver,
+ "Symbol.prototype [ @@toPrimitive ]", this);
+ }
+ return %_ValueOf(this);
}
@@ -45,8 +44,7 @@ function SymbolToString() {
throw MakeTypeError(kIncompatibleMethodReceiver,
"Symbol.prototype.toString", this);
}
- var description = %SymbolDescription(%_ValueOf(this));
- return "Symbol(" + (IS_UNDEFINED(description) ? "" : description) + ")";
+ return %SymbolDescriptiveString(%_ValueOf(this));
}
@@ -60,7 +58,7 @@ function SymbolValueOf() {
function SymbolFor(key) {
- key = TO_STRING_INLINE(key);
+ key = TO_STRING(key);
var registry = %SymbolRegistry();
if (IS_UNDEFINED(registry.for[key])) {
var symbol = %CreateSymbol(key);
@@ -86,21 +84,21 @@ function ObjectGetOwnPropertySymbols(obj) {
return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_STRING);
}
-//-------------------------------------------------------------------
+// -------------------------------------------------------------------
-%SetCode(GlobalSymbol, SymbolConstructor);
%FunctionSetPrototype(GlobalSymbol, new GlobalObject());
utils.InstallConstants(GlobalSymbol, [
// TODO(rossberg): expose when implemented.
- // "hasInstance", symbolHasInstance,
- // "isConcatSpreadable", symbolIsConcatSpreadable,
- // "isRegExp", symbolIsRegExp,
- "iterator", symbolIterator,
+ // "hasInstance", hasInstanceSymbol,
+ // "isConcatSpreadable", isConcatSpreadableSymbol,
+ // "isRegExp", isRegExpSymbol,
+ "iterator", iteratorSymbol,
+ "toPrimitive", toPrimitiveSymbol,
// TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
// Move here when shipping
- // "toStringTag", symbolToStringTag,
- "unscopables", symbolUnscopables
+ // "toStringTag", toStringTagSymbol,
+ "unscopables", unscopablesSymbol,
]);
utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
@@ -111,7 +109,11 @@ utils.InstallFunctions(GlobalSymbol, DONT_ENUM, [
%AddNamedProperty(
GlobalSymbol.prototype, "constructor", GlobalSymbol, DONT_ENUM);
%AddNamedProperty(
- GlobalSymbol.prototype, symbolToStringTag, "Symbol", DONT_ENUM | READ_ONLY);
+ GlobalSymbol.prototype, toStringTagSymbol, "Symbol", DONT_ENUM | READ_ONLY);
+
+utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM | READ_ONLY, [
+ toPrimitiveSymbol, SymbolToPrimitive
+]);
utils.InstallFunctions(GlobalSymbol.prototype, DONT_ENUM, [
"toString", SymbolToString,
@@ -122,6 +124,11 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
"getOwnPropertySymbols", ObjectGetOwnPropertySymbols
]);
-$symbolToString = SymbolToString;
+// -------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.SymbolToString = SymbolToString;
+})
})
diff --git a/deps/v8/src/templates.js b/deps/v8/src/templates.js
index b7e1527fc6..b273bc39e8 100644
--- a/deps/v8/src/templates.js
+++ b/deps/v8/src/templates.js
@@ -4,8 +4,6 @@
// Called from a desugaring in the parser.
-var $getTemplateCallSite;
-
(function(global, utils) {
"use strict";
@@ -67,7 +65,7 @@ function SetCachedCallSite(siteObj, hash) {
}
-$getTemplateCallSite = function(siteObj, rawStrings, hash) {
+function GetTemplateCallSite(siteObj, rawStrings, hash) {
var cached = GetCachedCallSite(rawStrings, hash);
if (!IS_UNDEFINED(cached)) return cached;
@@ -78,4 +76,9 @@ $getTemplateCallSite = function(siteObj, rawStrings, hash) {
return SetCachedCallSite(%ObjectFreeze(siteObj), hash);
}
+// ----------------------------------------------------------------------------
+// Exports
+
+%InstallToContext(["get_template_call_site", GetTemplateCallSite]);
+
})
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index cc7e9f8cbd..db9092d21b 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -166,6 +166,7 @@ namespace internal {
\
/* Scanner-internal use only. */ \
T(WHITESPACE, NULL, 0) \
+ T(UNINITIALIZED, NULL, 0) \
\
/* ES6 Template Literals */ \
T(TEMPLATE_SPAN, NULL, 0) \
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 9870e17d83..64b8133528 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/transitions.h"
-#include "src/objects.h"
+#include "src/objects-inl.h"
#include "src/transitions-inl.h"
#include "src/utils.h"
diff --git a/deps/v8/src/type-feedback-vector-inl.h b/deps/v8/src/type-feedback-vector-inl.h
index 7fa51d802a..4d1c345e68 100644
--- a/deps/v8/src/type-feedback-vector-inl.h
+++ b/deps/v8/src/type-feedback-vector-inl.h
@@ -10,11 +10,118 @@
namespace v8 {
namespace internal {
+// static
+TypeFeedbackVector* TypeFeedbackVector::cast(Object* obj) {
+ DCHECK(obj->IsTypeFeedbackVector());
+ return reinterpret_cast<TypeFeedbackVector*>(obj);
+}
+
+
+int TypeFeedbackVector::first_ic_slot_index() const {
+ DCHECK(length() >= kReservedIndexCount);
+ return Smi::cast(get(kFirstICSlotIndex))->value();
+}
+
+
+int TypeFeedbackVector::ic_with_type_info_count() {
+ return length() > 0 ? Smi::cast(get(kWithTypesIndex))->value() : 0;
+}
+
+
+void TypeFeedbackVector::change_ic_with_type_info_count(int delta) {
+ if (delta == 0) return;
+ int value = ic_with_type_info_count() + delta;
+ // Could go negative because of the debugger.
+ if (value >= 0) {
+ set(kWithTypesIndex, Smi::FromInt(value));
+ }
+}
+
+
+int TypeFeedbackVector::ic_generic_count() {
+ return length() > 0 ? Smi::cast(get(kGenericCountIndex))->value() : 0;
+}
+
+
+void TypeFeedbackVector::change_ic_generic_count(int delta) {
+ if (delta == 0) return;
+ int value = ic_generic_count() + delta;
+ if (value >= 0) {
+ set(kGenericCountIndex, Smi::FromInt(value));
+ }
+}
+
+
+int TypeFeedbackVector::Slots() const {
+ if (length() == 0) return 0;
+ return Max(
+ 0, first_ic_slot_index() - ic_metadata_length() - kReservedIndexCount);
+}
+
+
+int TypeFeedbackVector::ICSlots() const {
+ if (length() == 0) return 0;
+ return (length() - first_ic_slot_index()) / elements_per_ic_slot();
+}
+
+
int TypeFeedbackVector::ic_metadata_length() const {
return VectorICComputer::word_count(ICSlots());
}
+// Conversion from a slot or ic slot to an integer index to the underlying
+// array.
+int TypeFeedbackVector::GetIndex(FeedbackVectorSlot slot) const {
+ DCHECK(slot.ToInt() < first_ic_slot_index());
+ return kReservedIndexCount + ic_metadata_length() + slot.ToInt();
+}
+
+
+int TypeFeedbackVector::GetIndex(FeedbackVectorICSlot slot) const {
+ int first_ic_slot = first_ic_slot_index();
+ DCHECK(slot.ToInt() < ICSlots());
+ return first_ic_slot + slot.ToInt() * elements_per_ic_slot();
+}
+
+
+// Conversion from an integer index to either a slot or an ic slot. The caller
+// should know what kind she expects.
+FeedbackVectorSlot TypeFeedbackVector::ToSlot(int index) const {
+ DCHECK(index >= kReservedIndexCount && index < first_ic_slot_index());
+ return FeedbackVectorSlot(index - ic_metadata_length() - kReservedIndexCount);
+}
+
+
+FeedbackVectorICSlot TypeFeedbackVector::ToICSlot(int index) const {
+ DCHECK(index >= first_ic_slot_index() && index < length());
+ int ic_slot = (index - first_ic_slot_index()) / elements_per_ic_slot();
+ return FeedbackVectorICSlot(ic_slot);
+}
+
+
+Object* TypeFeedbackVector::Get(FeedbackVectorSlot slot) const {
+ return get(GetIndex(slot));
+}
+
+
+void TypeFeedbackVector::Set(FeedbackVectorSlot slot, Object* value,
+ WriteBarrierMode mode) {
+ set(GetIndex(slot), value, mode);
+}
+
+
+Object* TypeFeedbackVector::Get(FeedbackVectorICSlot slot) const {
+ return get(GetIndex(slot));
+}
+
+
+void TypeFeedbackVector::Set(FeedbackVectorICSlot slot, Object* value,
+ WriteBarrierMode mode) {
+ set(GetIndex(slot), value, mode);
+}
+
+
Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
return isolate->factory()->uninitialized_symbol();
}
@@ -33,6 +140,32 @@ Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
Object* TypeFeedbackVector::RawUninitializedSentinel(Heap* heap) {
return heap->uninitialized_symbol();
}
+
+
+Object* FeedbackNexus::GetFeedback() const { return vector()->Get(slot()); }
+
+
+Object* FeedbackNexus::GetFeedbackExtra() const {
+ DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+ int extra_index = vector()->GetIndex(slot()) + 1;
+ return vector()->get(extra_index);
+}
+
+
+void FeedbackNexus::SetFeedback(Object* feedback, WriteBarrierMode mode) {
+ vector()->Set(slot(), feedback, mode);
+}
+
+
+void FeedbackNexus::SetFeedbackExtra(Object* feedback_extra,
+ WriteBarrierMode mode) {
+ DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+ int index = vector()->GetIndex(slot()) + 1;
+ vector()->set(index, feedback_extra, mode);
+}
+
+
+Isolate* FeedbackNexus::GetIsolate() const { return vector()->GetIsolate(); }
}
} // namespace v8::internal
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index c93e620d7d..9da3c655fb 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/type-feedback-vector.h"
#include "src/code-stubs.h"
#include "src/ic/ic.h"
@@ -13,73 +13,32 @@
namespace v8 {
namespace internal {
-// static
-TypeFeedbackVector::VectorICKind TypeFeedbackVector::FromCodeKind(
- Code::Kind kind) {
- switch (kind) {
- case Code::CALL_IC:
- return KindCallIC;
- case Code::LOAD_IC:
- return KindLoadIC;
- case Code::KEYED_LOAD_IC:
- return KindKeyedLoadIC;
- case Code::STORE_IC:
- return KindStoreIC;
- case Code::KEYED_STORE_IC:
- return KindKeyedStoreIC;
- default:
- // Shouldn't get here.
- UNREACHABLE();
- }
-
- return KindUnused;
-}
-
-
-// static
-Code::Kind TypeFeedbackVector::FromVectorICKind(VectorICKind kind) {
- switch (kind) {
- case KindCallIC:
- return Code::CALL_IC;
- case KindLoadIC:
- return Code::LOAD_IC;
- case KindKeyedLoadIC:
- return Code::KEYED_LOAD_IC;
- case KindStoreIC:
- DCHECK(FLAG_vector_stores);
- return Code::STORE_IC;
- case KindKeyedStoreIC:
- DCHECK(FLAG_vector_stores);
- return Code::KEYED_STORE_IC;
- case KindUnused:
- break;
- }
- // Sentinel for no information.
- return Code::NUMBER_OF_KINDS;
+std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind) {
+ return os << TypeFeedbackVector::Kind2String(kind);
}
-Code::Kind TypeFeedbackVector::GetKind(FeedbackVectorICSlot slot) const {
+FeedbackVectorSlotKind TypeFeedbackVector::GetKind(
+ FeedbackVectorICSlot slot) const {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
- VectorICKind b = VectorICComputer::decode(data, slot.ToInt());
- return FromVectorICKind(b);
+ return VectorICComputer::decode(data, slot.ToInt());
}
-void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot, Code::Kind kind) {
- VectorICKind b = FromCodeKind(kind);
+void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot,
+ FeedbackVectorSlotKind kind) {
int index = VectorICComputer::index(kReservedIndexCount, slot.ToInt());
int data = Smi::cast(get(index))->value();
- int new_data = VectorICComputer::encode(data, slot.ToInt(), b);
+ int new_data = VectorICComputer::encode(data, slot.ToInt(), kind);
set(index, Smi::FromInt(new_data));
}
template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
- Isolate* isolate, const FeedbackVectorSpec* spec);
+ Isolate* isolate, const StaticFeedbackVectorSpec* spec);
template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
- Isolate* isolate, const ZoneFeedbackVectorSpec* spec);
+ Isolate* isolate, const FeedbackVectorSpec* spec);
// static
@@ -125,6 +84,34 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
}
+template int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec*,
+ FeedbackVectorICSlot);
+template int TypeFeedbackVector::GetIndexFromSpec(const FeedbackVectorSpec*,
+ FeedbackVectorSlot);
+
+
+// static
+template <typename Spec>
+int TypeFeedbackVector::GetIndexFromSpec(const Spec* spec,
+ FeedbackVectorSlot slot) {
+ const int ic_slot_count = spec->ic_slots();
+ const int index_count = VectorICComputer::word_count(ic_slot_count);
+ return kReservedIndexCount + index_count + slot.ToInt();
+}
+
+
+// static
+template <typename Spec>
+int TypeFeedbackVector::GetIndexFromSpec(const Spec* spec,
+ FeedbackVectorICSlot slot) {
+ const int slot_count = spec->slots();
+ const int ic_slot_count = spec->ic_slots();
+ const int index_count = VectorICComputer::word_count(ic_slot_count);
+ return kReservedIndexCount + index_count + slot_count +
+ slot.ToInt() * elements_per_ic_slot();
+}
+
+
// static
int TypeFeedbackVector::PushAppliedArgumentsIndex() {
const int index_count = VectorICComputer::word_count(1);
@@ -135,8 +122,8 @@ int TypeFeedbackVector::PushAppliedArgumentsIndex() {
// static
Handle<TypeFeedbackVector> TypeFeedbackVector::CreatePushAppliedArgumentsVector(
Isolate* isolate) {
- Code::Kind kinds[] = {Code::KEYED_LOAD_IC};
- FeedbackVectorSpec spec(0, 1, kinds);
+ FeedbackVectorSlotKind kinds[] = {FeedbackVectorSlotKind::KEYED_LOAD_IC};
+ StaticFeedbackVectorSpec spec(0, 1, kinds);
Handle<TypeFeedbackVector> feedback_vector =
isolate->factory()->NewTypeFeedbackVector(&spec);
DCHECK(PushAppliedArgumentsIndex() ==
@@ -156,7 +143,7 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
bool TypeFeedbackVector::SpecDiffersFrom(
- const ZoneFeedbackVectorSpec* other_spec) const {
+ const FeedbackVectorSpec* other_spec) const {
if (other_spec->slots() != Slots() || other_spec->ic_slots() != ICSlots()) {
return true;
}
@@ -219,21 +206,70 @@ void TypeFeedbackVector::ClearICSlotsImpl(SharedFunctionInfo* shared,
FeedbackVectorICSlot slot(i);
Object* obj = Get(slot);
if (obj != uninitialized_sentinel) {
- Code::Kind kind = GetKind(slot);
- if (kind == Code::CALL_IC) {
- CallICNexus nexus(this, slot);
- nexus.Clear(host);
- } else if (kind == Code::LOAD_IC) {
- LoadICNexus nexus(this, slot);
- nexus.Clear(host);
- } else if (kind == Code::KEYED_LOAD_IC) {
- KeyedLoadICNexus nexus(this, slot);
- nexus.Clear(host);
- } else if (kind == Code::STORE_IC) {
- DCHECK(FLAG_vector_stores);
- StoreICNexus nexus(this, slot);
- nexus.Clear(host);
- } else if (kind == Code::KEYED_STORE_IC) {
+ FeedbackVectorSlotKind kind = GetKind(slot);
+ switch (kind) {
+ case FeedbackVectorSlotKind::CALL_IC: {
+ CallICNexus nexus(this, slot);
+ nexus.Clear(host);
+ break;
+ }
+ case FeedbackVectorSlotKind::LOAD_IC: {
+ LoadICNexus nexus(this, slot);
+ nexus.Clear(host);
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC: {
+ KeyedLoadICNexus nexus(this, slot);
+ nexus.Clear(host);
+ break;
+ }
+ case FeedbackVectorSlotKind::STORE_IC: {
+ DCHECK(FLAG_vector_stores);
+ StoreICNexus nexus(this, slot);
+ nexus.Clear(host);
+ break;
+ }
+ case FeedbackVectorSlotKind::KEYED_STORE_IC: {
+ DCHECK(FLAG_vector_stores);
+ KeyedStoreICNexus nexus(this, slot);
+ nexus.Clear(host);
+ break;
+ }
+ case FeedbackVectorSlotKind::UNUSED:
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+// static
+void TypeFeedbackVector::ClearAllKeyedStoreICs(Isolate* isolate) {
+ DCHECK(FLAG_vector_stores);
+ SharedFunctionInfo::Iterator iterator(isolate);
+ SharedFunctionInfo* shared;
+ while ((shared = iterator.Next())) {
+ TypeFeedbackVector* vector = shared->feedback_vector();
+ vector->ClearKeyedStoreICs(shared);
+ }
+}
+
+
+void TypeFeedbackVector::ClearKeyedStoreICs(SharedFunctionInfo* shared) {
+ Heap* heap = GetIsolate()->heap();
+
+ int slots = ICSlots();
+ Code* host = shared->code();
+ Object* uninitialized_sentinel =
+ TypeFeedbackVector::RawUninitializedSentinel(heap);
+ for (int i = 0; i < slots; i++) {
+ FeedbackVectorICSlot slot(i);
+ Object* obj = Get(slot);
+ if (obj != uninitialized_sentinel) {
+ FeedbackVectorSlotKind kind = GetKind(slot);
+ if (kind == FeedbackVectorSlotKind::KEYED_STORE_IC) {
DCHECK(FLAG_vector_stores);
KeyedStoreICNexus nexus(this, slot);
nexus.Clear(host);
@@ -249,6 +285,28 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::DummyVector(Isolate* isolate) {
}
+const char* TypeFeedbackVector::Kind2String(FeedbackVectorSlotKind kind) {
+ switch (kind) {
+ case FeedbackVectorSlotKind::UNUSED:
+ return "UNUSED";
+ case FeedbackVectorSlotKind::CALL_IC:
+ return "CALL_IC";
+ case FeedbackVectorSlotKind::LOAD_IC:
+ return "LOAD_IC";
+ case FeedbackVectorSlotKind::KEYED_LOAD_IC:
+ return "KEYED_LOAD_IC";
+ case FeedbackVectorSlotKind::STORE_IC:
+ return "STORE_IC";
+ case FeedbackVectorSlotKind::KEYED_STORE_IC:
+ return "KEYED_STORE_IC";
+ case FeedbackVectorSlotKind::KINDS_NUMBER:
+ break;
+ }
+ UNREACHABLE();
+ return "?";
+}
+
+
Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
Handle<Object> feedback = handle(GetFeedback(), isolate);
@@ -570,6 +628,32 @@ void KeyedStoreICNexus::ConfigurePolymorphic(Handle<Name> name,
}
+void KeyedStoreICNexus::ConfigurePolymorphic(MapHandleList* maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers) {
+ int receiver_count = maps->length();
+ DCHECK(receiver_count > 1);
+ Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 3);
+ SetFeedbackExtra(*TypeFeedbackVector::UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
+
+ Handle<Oddball> undefined_value = GetIsolate()->factory()->undefined_value();
+ for (int i = 0; i < receiver_count; ++i) {
+ Handle<Map> map = maps->at(i);
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ array->set(i * 3, *cell);
+ if (!transitioned_maps->at(i).is_null()) {
+ Handle<Map> transitioned_map = transitioned_maps->at(i);
+ cell = Map::WeakCellForMap(transitioned_map);
+ array->set((i * 3) + 1, *cell);
+ } else {
+ array->set((i * 3) + 1, *undefined_value);
+ }
+ array->set((i * 3) + 2, *handlers->at(i));
+ }
+}
+
+
int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
@@ -579,10 +663,13 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
- // The array should be of the form [<optional name>], then
- // [map, handler, map, handler, ... ]
+ // The array should be of the form
+ // [map, handler, map, handler, ...]
+ // or
+ // [map, map, handler, map, map, handler, ...]
DCHECK(array->length() >= 2);
- for (int i = 0; i < array->length(); i += 2) {
+ int increment = array->get(1)->IsCode() ? 2 : 3;
+ for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
@@ -612,13 +699,15 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
- for (int i = 0; i < array->length(); i += 2) {
+ DCHECK(array->length() >= 2);
+ int increment = array->get(1)->IsCode() ? 2 : 3;
+ for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
Map* array_map = Map::cast(cell->value());
if (array_map == *map) {
- Code* code = Code::cast(array->get(i + 1));
+ Code* code = Code::cast(array->get(i + increment - 1));
DCHECK(code->kind() == Code::HANDLER);
return handle(code);
}
@@ -648,14 +737,18 @@ bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
feedback = GetFeedbackExtra();
}
FixedArray* array = FixedArray::cast(feedback);
- // The array should be of the form [map, handler, map, handler, ... ].
+ // The array should be of the form
+ // [map, handler, map, handler, ...]
+ // or
+ // [map, map, handler, map, map, handler, ...]
// Be sure to skip handlers whose maps have been cleared.
DCHECK(array->length() >= 2);
- for (int i = 0; i < array->length(); i += 2) {
+ int increment = array->get(1)->IsCode() ? 2 : 3;
+ for (int i = 0; i < array->length(); i += increment) {
DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
- Code* code = Code::cast(array->get(i + 1));
+ Code* code = Code::cast(array->get(i + increment - 1));
DCHECK(code->kind() == Code::HANDLER);
code_list->Add(handle(code));
count++;
@@ -708,5 +801,40 @@ void StoreICNexus::Clear(Code* host) {
void KeyedStoreICNexus::Clear(Code* host) {
KeyedStoreIC::Clear(GetIsolate(), host, this);
}
+
+
+KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
+ KeyedAccessStoreMode mode = STANDARD_STORE;
+ MapHandleList maps;
+ CodeHandleList handlers;
+
+ if (GetKeyType() == PROPERTY) return mode;
+
+ ExtractMaps(&maps);
+ FindHandlers(&handlers, maps.length());
+ for (int i = 0; i < handlers.length(); i++) {
+ // The first handler that isn't the slow handler will have the bits we need.
+ Handle<Code> handler = handlers.at(i);
+ CodeStub::Major major_key = CodeStub::MajorKeyFromKey(handler->stub_key());
+ uint32_t minor_key = CodeStub::MinorKeyFromKey(handler->stub_key());
+ CHECK(major_key == CodeStub::KeyedStoreSloppyArguments ||
+ major_key == CodeStub::StoreFastElement ||
+ major_key == CodeStub::StoreElement ||
+ major_key == CodeStub::ElementsTransitionAndStore ||
+ major_key == CodeStub::NoCache);
+ if (major_key != CodeStub::NoCache) {
+ mode = CommonStoreModeBits::decode(minor_key);
+ break;
+ }
+ }
+
+ return mode;
+}
+
+
+IcCheckType KeyedStoreICNexus::GetKeyType() const {
+ // The structure of the vector slots tells us the type.
+ return GetFeedback()->IsName() ? PROPERTY : ELEMENT;
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index 3c4c5e680a..5c28fca55f 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -17,19 +17,34 @@
namespace v8 {
namespace internal {
-class FeedbackVectorSpec {
+
+enum class FeedbackVectorSlotKind {
+ UNUSED,
+ CALL_IC,
+ LOAD_IC,
+ KEYED_LOAD_IC,
+ STORE_IC,
+ KEYED_STORE_IC,
+
+ KINDS_NUMBER // Last value indicating number of kinds.
+};
+
+
+std::ostream& operator<<(std::ostream& os, FeedbackVectorSlotKind kind);
+
+
+class StaticFeedbackVectorSpec {
public:
- FeedbackVectorSpec() : slots_(0), ic_slots_(0), ic_kinds_(NULL) {}
- explicit FeedbackVectorSpec(int slots)
- : slots_(slots), ic_slots_(0), ic_kinds_(NULL) {}
- FeedbackVectorSpec(int slots, int ic_slots, Code::Kind* ic_slot_kinds)
+ StaticFeedbackVectorSpec() : slots_(0), ic_slots_(0), ic_kinds_(NULL) {}
+ StaticFeedbackVectorSpec(int slots, int ic_slots,
+ FeedbackVectorSlotKind* ic_slot_kinds)
: slots_(slots), ic_slots_(ic_slots), ic_kinds_(ic_slot_kinds) {}
int slots() const { return slots_; }
int ic_slots() const { return ic_slots_; }
- Code::Kind GetKind(int ic_slot) const {
+ FeedbackVectorSlotKind GetKind(int ic_slot) const {
DCHECK(ic_slots_ > 0 && ic_slot < ic_slots_);
return ic_kinds_[ic_slot];
}
@@ -37,33 +52,78 @@ class FeedbackVectorSpec {
private:
int slots_;
int ic_slots_;
- Code::Kind* ic_kinds_;
+ FeedbackVectorSlotKind* ic_kinds_;
};
-class ZoneFeedbackVectorSpec {
+class FeedbackVectorSpec {
public:
- explicit ZoneFeedbackVectorSpec(Zone* zone)
+ explicit FeedbackVectorSpec(Zone* zone)
: slots_(0), ic_slots_(0), ic_slot_kinds_(zone) {}
- ZoneFeedbackVectorSpec(Zone* zone, int slots, int ic_slots)
- : slots_(slots), ic_slots_(ic_slots), ic_slot_kinds_(ic_slots, zone) {}
-
int slots() const { return slots_; }
- void increase_slots(int count) { slots_ += count; }
+ void increase_slots(int count) {
+ DCHECK_LT(0, count);
+ slots_ += count;
+ }
int ic_slots() const { return ic_slots_; }
void increase_ic_slots(int count) {
+ DCHECK_LT(0, count);
ic_slots_ += count;
ic_slot_kinds_.resize(ic_slots_);
}
- void SetKind(int ic_slot, Code::Kind kind) {
- ic_slot_kinds_[ic_slot] = kind;
+ FeedbackVectorICSlot AddSlot(FeedbackVectorSlotKind kind) {
+ int slot = ic_slots_;
+ increase_ic_slots(1);
+ ic_slot_kinds_[slot] = static_cast<unsigned char>(kind);
+ return FeedbackVectorICSlot(slot);
+ }
+
+ FeedbackVectorICSlot AddSlots(FeedbackVectorSlotKind kind, int count) {
+ int slot = ic_slots_;
+ increase_ic_slots(count);
+ for (int i = 0; i < count; i++) {
+ ic_slot_kinds_[slot + i] = static_cast<unsigned char>(kind);
+ }
+ return FeedbackVectorICSlot(slot);
+ }
+
+ FeedbackVectorICSlot AddCallICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ }
+
+ FeedbackVectorICSlot AddLoadICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::LOAD_IC);
+ }
+
+ FeedbackVectorICSlot AddLoadICSlots(int count) {
+ return AddSlots(FeedbackVectorSlotKind::LOAD_IC, count);
+ }
+
+ FeedbackVectorICSlot AddKeyedLoadICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
+ }
+
+ FeedbackVectorICSlot AddStoreICSlot() {
+ return AddSlot(FeedbackVectorSlotKind::STORE_IC);
+ }
+
+ FeedbackVectorSlot AddStubSlot() {
+ int slot = slots_;
+ increase_slots(1);
+ return FeedbackVectorSlot(slot);
}
- Code::Kind GetKind(int ic_slot) const {
- return static_cast<Code::Kind>(ic_slot_kinds_.at(ic_slot));
+ FeedbackVectorSlot AddStubSlots(int count) {
+ int slot = slots_;
+ increase_slots(count);
+ return FeedbackVectorSlot(slot);
+ }
+
+ FeedbackVectorSlotKind GetKind(int ic_slot) const {
+ return static_cast<FeedbackVectorSlotKind>(ic_slot_kinds_.at(ic_slot));
}
private:
@@ -87,10 +147,7 @@ class ZoneFeedbackVectorSpec {
class TypeFeedbackVector : public FixedArray {
public:
// Casting.
- static TypeFeedbackVector* cast(Object* obj) {
- DCHECK(obj->IsTypeFeedbackVector());
- return reinterpret_cast<TypeFeedbackVector*>(obj);
- }
+ static inline TypeFeedbackVector* cast(Object* obj);
static const int kReservedIndexCount = 3;
static const int kFirstICSlotIndex = 0;
@@ -99,92 +156,41 @@ class TypeFeedbackVector : public FixedArray {
static int elements_per_ic_slot() { return 2; }
- int first_ic_slot_index() const {
- DCHECK(length() >= kReservedIndexCount);
- return Smi::cast(get(kFirstICSlotIndex))->value();
- }
-
- int ic_with_type_info_count() {
- return length() > 0 ? Smi::cast(get(kWithTypesIndex))->value() : 0;
- }
-
- void change_ic_with_type_info_count(int delta) {
- if (delta == 0) return;
- int value = ic_with_type_info_count() + delta;
- // Could go negative because of the debugger.
- if (value >= 0) {
- set(kWithTypesIndex, Smi::FromInt(value));
- }
- }
-
- int ic_generic_count() {
- return length() > 0 ? Smi::cast(get(kGenericCountIndex))->value() : 0;
- }
-
- void change_ic_generic_count(int delta) {
- if (delta == 0) return;
- int value = ic_generic_count() + delta;
- if (value >= 0) {
- set(kGenericCountIndex, Smi::FromInt(value));
- }
- }
-
+ inline int first_ic_slot_index() const;
+ inline int ic_with_type_info_count();
+ inline void change_ic_with_type_info_count(int delta);
+ inline int ic_generic_count();
+ inline void change_ic_generic_count(int delta);
inline int ic_metadata_length() const;
- bool SpecDiffersFrom(const ZoneFeedbackVectorSpec* other_spec) const;
+ bool SpecDiffersFrom(const FeedbackVectorSpec* other_spec) const;
- int Slots() const {
- if (length() == 0) return 0;
- return Max(
- 0, first_ic_slot_index() - ic_metadata_length() - kReservedIndexCount);
- }
-
- int ICSlots() const {
- if (length() == 0) return 0;
- return (length() - first_ic_slot_index()) / elements_per_ic_slot();
- }
+ inline int Slots() const;
+ inline int ICSlots() const;
// Conversion from a slot or ic slot to an integer index to the underlying
// array.
- int GetIndex(FeedbackVectorSlot slot) const {
- DCHECK(slot.ToInt() < first_ic_slot_index());
- return kReservedIndexCount + ic_metadata_length() + slot.ToInt();
- }
+ inline int GetIndex(FeedbackVectorSlot slot) const;
+ inline int GetIndex(FeedbackVectorICSlot slot) const;
- int GetIndex(FeedbackVectorICSlot slot) const {
- int first_ic_slot = first_ic_slot_index();
- DCHECK(slot.ToInt() < ICSlots());
- return first_ic_slot + slot.ToInt() * elements_per_ic_slot();
- }
+ template <typename Spec>
+ static int GetIndexFromSpec(const Spec* spec, FeedbackVectorSlot slot);
+ template <typename Spec>
+ static int GetIndexFromSpec(const Spec* spec, FeedbackVectorICSlot slot);
// Conversion from an integer index to either a slot or an ic slot. The caller
// should know what kind she expects.
- FeedbackVectorSlot ToSlot(int index) const {
- DCHECK(index >= kReservedIndexCount && index < first_ic_slot_index());
- return FeedbackVectorSlot(index - ic_metadata_length() -
- kReservedIndexCount);
- }
-
- FeedbackVectorICSlot ToICSlot(int index) const {
- DCHECK(index >= first_ic_slot_index() && index < length());
- int ic_slot = (index - first_ic_slot_index()) / elements_per_ic_slot();
- return FeedbackVectorICSlot(ic_slot);
- }
-
- Object* Get(FeedbackVectorSlot slot) const { return get(GetIndex(slot)); }
- void Set(FeedbackVectorSlot slot, Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
- set(GetIndex(slot), value, mode);
- }
-
- Object* Get(FeedbackVectorICSlot slot) const { return get(GetIndex(slot)); }
- void Set(FeedbackVectorICSlot slot, Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
- set(GetIndex(slot), value, mode);
- }
+ inline FeedbackVectorSlot ToSlot(int index) const;
+ inline FeedbackVectorICSlot ToICSlot(int index) const;
+ inline Object* Get(FeedbackVectorSlot slot) const;
+ inline void Set(FeedbackVectorSlot slot, Object* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline Object* Get(FeedbackVectorICSlot slot) const;
+ inline void Set(FeedbackVectorICSlot slot, Object* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// IC slots need metadata to recognize the type of IC.
- Code::Kind GetKind(FeedbackVectorICSlot slot) const;
+ FeedbackVectorSlotKind GetKind(FeedbackVectorICSlot slot) const;
template <typename Spec>
static Handle<TypeFeedbackVector> Allocate(Isolate* isolate,
@@ -213,6 +219,9 @@ class TypeFeedbackVector : public FixedArray {
ClearICSlotsImpl(shared, false);
}
+ static void ClearAllKeyedStoreICs(Isolate* isolate);
+ void ClearKeyedStoreICs(SharedFunctionInfo* shared);
+
// The object that indicates an uninitialized cache.
static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
@@ -241,23 +250,17 @@ class TypeFeedbackVector : public FixedArray {
static Handle<TypeFeedbackVector> CreatePushAppliedArgumentsVector(
Isolate* isolate);
+ static const char* Kind2String(FeedbackVectorSlotKind kind);
+
private:
- enum VectorICKind {
- KindUnused = 0x0,
- KindCallIC = 0x1,
- KindLoadIC = 0x2,
- KindKeyedLoadIC = 0x3,
- KindStoreIC = 0x4,
- KindKeyedStoreIC = 0x5,
- };
-
- static const int kVectorICKindBits = 3;
- static VectorICKind FromCodeKind(Code::Kind kind);
- static Code::Kind FromVectorICKind(VectorICKind kind);
- void SetKind(FeedbackVectorICSlot slot, Code::Kind kind);
-
- typedef BitSetComputer<VectorICKind, kVectorICKindBits, kSmiValueSize,
- uint32_t> VectorICComputer;
+ static const int kFeedbackVectorSlotKindBits = 3;
+ STATIC_ASSERT(static_cast<int>(FeedbackVectorSlotKind::KINDS_NUMBER) <
+ (1 << kFeedbackVectorSlotKindBits));
+
+ void SetKind(FeedbackVectorICSlot slot, FeedbackVectorSlotKind kind);
+
+ typedef BitSetComputer<FeedbackVectorSlotKind, kFeedbackVectorSlotKindBits,
+ kSmiValueSize, uint32_t> VectorICComputer;
void ClearSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
void ClearICSlotsImpl(SharedFunctionInfo* shared, bool force_clear);
@@ -321,27 +324,16 @@ class FeedbackNexus {
virtual void ConfigurePremonomorphic();
virtual void ConfigureMegamorphic();
- Object* GetFeedback() const { return vector()->Get(slot()); }
- Object* GetFeedbackExtra() const {
- DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
- int extra_index = vector()->GetIndex(slot()) + 1;
- return vector()->get(extra_index);
- }
+ inline Object* GetFeedback() const;
+ inline Object* GetFeedbackExtra() const;
protected:
- Isolate* GetIsolate() const { return vector()->GetIsolate(); }
-
- void SetFeedback(Object* feedback,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
- vector()->Set(slot(), feedback, mode);
- }
+ inline Isolate* GetIsolate() const;
- void SetFeedbackExtra(Object* feedback_extra,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
- DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
- int index = vector()->GetIndex(slot()) + 1;
- vector()->set(index, feedback_extra, mode);
- }
+ inline void SetFeedback(Object* feedback,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ inline void SetFeedbackExtra(Object* feedback_extra,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
Handle<FixedArray> EnsureArrayOfSize(int length);
Handle<FixedArray> EnsureExtraArrayOfSize(int length);
@@ -367,11 +359,11 @@ class CallICNexus : public FeedbackNexus {
CallICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::CALL_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
}
CallICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::CALL_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::CALL_IC, vector->GetKind(slot));
}
void Clear(Code* host);
@@ -400,7 +392,7 @@ class LoadICNexus : public FeedbackNexus {
public:
LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
}
explicit LoadICNexus(Isolate* isolate)
: FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
@@ -408,7 +400,7 @@ class LoadICNexus : public FeedbackNexus {
TypeFeedbackVector::kDummyLoadICSlot)) {}
LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, vector->GetKind(slot));
}
void Clear(Code* host);
@@ -425,11 +417,11 @@ class KeyedLoadICNexus : public FeedbackNexus {
public:
KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::KEYED_LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::KEYED_LOAD_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
}
void Clear(Code* host);
@@ -450,7 +442,7 @@ class StoreICNexus : public FeedbackNexus {
public:
StoreICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
}
explicit StoreICNexus(Isolate* isolate)
: FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
@@ -458,7 +450,7 @@ class StoreICNexus : public FeedbackNexus {
TypeFeedbackVector::kDummyStoreICSlot)) {}
StoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
}
void Clear(Code* host);
@@ -476,11 +468,15 @@ class KeyedStoreICNexus : public FeedbackNexus {
KeyedStoreICNexus(Handle<TypeFeedbackVector> vector,
FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::KEYED_STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
}
+ explicit KeyedStoreICNexus(Isolate* isolate)
+ : FeedbackNexus(TypeFeedbackVector::DummyVector(isolate),
+ TypeFeedbackVector::DummySlot(
+ TypeFeedbackVector::kDummyKeyedStoreICSlot)) {}
KeyedStoreICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
: FeedbackNexus(vector, slot) {
- DCHECK(vector->GetKind(slot) == Code::KEYED_STORE_IC);
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, vector->GetKind(slot));
}
void Clear(Code* host);
@@ -491,6 +487,12 @@ class KeyedStoreICNexus : public FeedbackNexus {
// name can be null.
void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
CodeHandleList* handlers);
+ void ConfigurePolymorphic(MapHandleList* maps,
+ MapHandleList* transitioned_maps,
+ CodeHandleList* handlers);
+
+ KeyedAccessStoreMode GetKeyedAccessStoreMode() const;
+ IcCheckType GetKeyType() const;
InlineCacheState StateFromFeedback() const override;
Name* FindFirstName() const override;
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 1dbe21c9fa..ef5432176b 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/type-info.h"
#include "src/ast.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/type-info.h"
+#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@@ -107,11 +107,11 @@ InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(TypeFeedbackId id) {
InlineCacheState TypeFeedbackOracle::LoadInlineCacheState(
FeedbackVectorICSlot slot) {
if (!slot.IsInvalid()) {
- Code::Kind kind = feedback_vector_->GetKind(slot);
- if (kind == Code::LOAD_IC) {
+ FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
+ if (kind == FeedbackVectorSlotKind::LOAD_IC) {
LoadICNexus nexus(feedback_vector_, slot);
return nexus.StateFromFeedback();
- } else if (kind == Code::KEYED_LOAD_IC) {
+ } else if (kind == FeedbackVectorSlotKind::KEYED_LOAD_IC) {
KeyedLoadICNexus nexus(feedback_vector_, slot);
return nexus.StateFromFeedback();
}
@@ -131,6 +131,21 @@ bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
}
+bool TypeFeedbackOracle::StoreIsUninitialized(FeedbackVectorICSlot slot) {
+ if (!slot.IsInvalid()) {
+ FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
+ if (kind == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(feedback_vector_, slot);
+ return nexus.StateFromFeedback() == UNINITIALIZED;
+ } else if (kind == FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ KeyedStoreICNexus nexus(feedback_vector_, slot);
+ return nexus.StateFromFeedback() == UNINITIALIZED;
+ }
+ }
+ return true;
+}
+
+
bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorICSlot slot) {
Handle<Object> value = GetInfo(slot);
return value->IsUndefined() ||
@@ -147,9 +162,7 @@ bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorICSlot slot) {
bool TypeFeedbackOracle::CallNewIsMonomorphic(FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
- return FLAG_pretenuring_call_new
- ? info->IsJSFunction()
- : info->IsAllocationSite() || info->IsJSFunction();
+ return info->IsAllocationSite() || info->IsJSFunction();
}
@@ -180,18 +193,19 @@ void TypeFeedbackOracle::GetStoreModeAndKeyType(
}
-void TypeFeedbackOracle::GetLoadKeyType(
- TypeFeedbackId ast_id, IcCheckType* key_type) {
- Handle<Object> maybe_code = GetInfo(ast_id);
- if (maybe_code->IsCode()) {
- Handle<Code> code = Handle<Code>::cast(maybe_code);
- if (code->kind() == Code::KEYED_LOAD_IC) {
- ExtraICState extra_ic_state = code->extra_ic_state();
- *key_type = KeyedLoadIC::GetKeyType(extra_ic_state);
- return;
- }
+void TypeFeedbackOracle::GetStoreModeAndKeyType(
+ FeedbackVectorICSlot slot, KeyedAccessStoreMode* store_mode,
+ IcCheckType* key_type) {
+ if (!slot.IsInvalid() &&
+ feedback_vector_->GetKind(slot) ==
+ FeedbackVectorSlotKind::KEYED_STORE_IC) {
+ KeyedStoreICNexus nexus(feedback_vector_, slot);
+ *store_mode = nexus.GetKeyedAccessStoreMode();
+ *key_type = nexus.GetKeyType();
+ } else {
+ *store_mode = STANDARD_STORE;
+ *key_type = ELEMENT;
}
- *key_type = ELEMENT;
}
@@ -209,7 +223,7 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(
Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(
FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
- if (FLAG_pretenuring_call_new || info->IsJSFunction()) {
+ if (info->IsJSFunction()) {
return Handle<JSFunction>::cast(info);
}
@@ -231,7 +245,7 @@ Handle<AllocationSite> TypeFeedbackOracle::GetCallAllocationSite(
Handle<AllocationSite> TypeFeedbackOracle::GetCallNewAllocationSite(
FeedbackVectorSlot slot) {
Handle<Object> info = GetInfo(slot);
- if (FLAG_pretenuring_call_new || info->IsAllocationSite()) {
+ if (info->IsAllocationSite()) {
return Handle<AllocationSite>::cast(info);
}
return Handle<AllocationSite>::null();
@@ -366,6 +380,15 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(TypeFeedbackId id,
}
+void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackVectorICSlot slot,
+ Handle<Name> name,
+ SmallMapList* receiver_types) {
+ receiver_types->Clear();
+ Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
+ CollectReceiverTypes(slot, name, flags, receiver_types);
+}
+
+
void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
TypeFeedbackId id, SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
@@ -375,6 +398,15 @@ void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
}
+void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
+ FeedbackVectorICSlot slot, SmallMapList* receiver_types,
+ KeyedAccessStoreMode* store_mode, IcCheckType* key_type) {
+ receiver_types->Clear();
+ CollectReceiverTypes(slot, receiver_types);
+ GetStoreModeAndKeyType(slot, store_mode, key_type);
+}
+
+
void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types) {
receiver_types->Clear();
@@ -382,6 +414,22 @@ void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
}
+void TypeFeedbackOracle::CountReceiverTypes(FeedbackVectorICSlot slot,
+ SmallMapList* receiver_types) {
+ receiver_types->Clear();
+ if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
+}
+
+
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
+ Handle<Name> name,
+ Code::Flags flags,
+ SmallMapList* types) {
+ StoreICNexus nexus(feedback_vector_, slot);
+ CollectReceiverTypes<FeedbackNexus>(&nexus, name, flags, types);
+}
+
+
void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
Handle<Name> name,
Code::Flags flags,
@@ -419,6 +467,20 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
}
+void TypeFeedbackOracle::CollectReceiverTypes(FeedbackVectorICSlot slot,
+ SmallMapList* types) {
+ FeedbackVectorSlotKind kind = feedback_vector_->GetKind(slot);
+ if (kind == FeedbackVectorSlotKind::STORE_IC) {
+ StoreICNexus nexus(feedback_vector_, slot);
+ CollectReceiverTypes<FeedbackNexus>(&nexus, types);
+ } else {
+ DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC, kind);
+ KeyedStoreICNexus nexus(feedback_vector_, slot);
+ CollectReceiverTypes<FeedbackNexus>(&nexus, types);
+ }
+}
+
+
template <class T>
void TypeFeedbackOracle::CollectReceiverTypes(T* obj, SmallMapList* types) {
MapHandleList maps;
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 965fca3010..96cc39f007 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -6,7 +6,9 @@
#define V8_TYPE_INFO_H_
#include "src/allocation.h"
+#include "src/contexts.h"
#include "src/globals.h"
+#include "src/token.h"
#include "src/types.h"
#include "src/zone.h"
@@ -26,6 +28,7 @@ class TypeFeedbackOracle: public ZoneObject {
InlineCacheState LoadInlineCacheState(TypeFeedbackId id);
InlineCacheState LoadInlineCacheState(FeedbackVectorICSlot slot);
bool StoreIsUninitialized(TypeFeedbackId id);
+ bool StoreIsUninitialized(FeedbackVectorICSlot slot);
bool CallIsUninitialized(FeedbackVectorICSlot slot);
bool CallIsMonomorphic(FeedbackVectorICSlot slot);
bool KeyedArrayCallIsHoley(TypeFeedbackId id);
@@ -40,7 +43,9 @@ class TypeFeedbackOracle: public ZoneObject {
void GetStoreModeAndKeyType(TypeFeedbackId id,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
- void GetLoadKeyType(TypeFeedbackId id, IcCheckType* key_type);
+ void GetStoreModeAndKeyType(FeedbackVectorICSlot slot,
+ KeyedAccessStoreMode* store_mode,
+ IcCheckType* key_type);
void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
@@ -49,13 +54,22 @@ class TypeFeedbackOracle: public ZoneObject {
IcCheckType* key_type);
void AssignmentReceiverTypes(TypeFeedbackId id, Handle<Name> name,
SmallMapList* receiver_types);
+ void AssignmentReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ SmallMapList* receiver_types);
void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types,
KeyedAccessStoreMode* store_mode,
IcCheckType* key_type);
+ void KeyedAssignmentReceiverTypes(FeedbackVectorICSlot slot,
+ SmallMapList* receiver_types,
+ KeyedAccessStoreMode* store_mode,
+ IcCheckType* key_type);
void CountReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types);
+ void CountReceiverTypes(FeedbackVectorICSlot slot,
+ SmallMapList* receiver_types);
+ void CollectReceiverTypes(FeedbackVectorICSlot slot, SmallMapList* types);
void CollectReceiverTypes(TypeFeedbackId id,
SmallMapList* types);
template <class T>
@@ -100,6 +114,8 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate() const { return isolate_; }
private:
+ void CollectReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
+ Code::Flags flags, SmallMapList* types);
void CollectReceiverTypes(TypeFeedbackId id, Handle<Name> name,
Code::Flags flags, SmallMapList* types);
template <class T>
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index d1f7ed1fdb..b45d304514 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -15,6 +15,8 @@ var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalDataView = global.DataView;
var GlobalObject = global.Object;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
@@ -35,13 +37,9 @@ endmacro
TYPED_ARRAYS(DECLARE_GLOBALS)
-var MathMax;
-var MathMin;
var ToNumber;
utils.Import(function(from) {
- MathMax = from.MathMax;
- MathMin = from.MathMin;
ToNumber = from.ToNumber;
});
@@ -141,13 +139,13 @@ function NAMEConstructByIterable(obj, iterable, iteratorFn) {
// was already looked up, and wrap it in another iterable. The
// __proto__ of the new iterable is set to null to avoid any chance
// of modifications to Object.prototype being observable here.
- var iterator = %_CallFunction(iterable, iteratorFn);
+ var iterator = %_Call(iteratorFn, iterable);
var newIterable = {
__proto__: null
};
// TODO(littledan): Computed properties don't work yet in nosnap.
// Rephrase when they do.
- newIterable[symbolIterator] = function() { return iterator; }
+ newIterable[iteratorSymbol] = function() { return iterator; }
for (var value of newIterable) {
list.push(value);
}
@@ -162,7 +160,7 @@ function NAMEConstructor(arg1, arg2, arg3) {
IS_BOOLEAN(arg1) || IS_UNDEFINED(arg1)) {
NAMEConstructByLength(this, arg1);
} else {
- var iteratorFn = arg1[symbolIterator];
+ var iteratorFn = arg1[iteratorSymbol];
if (IS_UNDEFINED(iteratorFn) || iteratorFn === $arrayValues) {
NAMEConstructByArrayLike(this, arg1);
} else {
@@ -208,25 +206,29 @@ function NAMESubArray(begin, end) {
}
var beginInt = TO_INTEGER(begin);
if (!IS_UNDEFINED(end)) {
- end = TO_INTEGER(end);
+ var endInt = TO_INTEGER(end);
+ var srcLength = %_TypedArrayGetLength(this);
+ } else {
+ var srcLength = %_TypedArrayGetLength(this);
+ var endInt = srcLength;
}
- var srcLength = %_TypedArrayGetLength(this);
if (beginInt < 0) {
- beginInt = MathMax(0, srcLength + beginInt);
+ beginInt = MAX_SIMPLE(0, srcLength + beginInt);
} else {
- beginInt = MathMin(srcLength, beginInt);
+ beginInt = MIN_SIMPLE(beginInt, srcLength);
}
- var endInt = IS_UNDEFINED(end) ? srcLength : end;
if (endInt < 0) {
- endInt = MathMax(0, srcLength + endInt);
+ endInt = MAX_SIMPLE(0, srcLength + endInt);
} else {
- endInt = MathMin(endInt, srcLength);
+ endInt = MIN_SIMPLE(endInt, srcLength);
}
+
if (endInt < beginInt) {
endInt = beginInt;
}
+
var newLength = endInt - beginInt;
var beginByteOffset =
%_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
@@ -332,7 +334,7 @@ function TypedArraySet(obj, offset) {
}
return;
}
- l = $toLength(l);
+ l = TO_LENGTH(l);
if (intOffset + l > this.length) {
throw MakeRangeError(kTypedArraySetSourceTooLarge);
}
@@ -368,7 +370,7 @@ macro SETUP_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
DONT_ENUM | DONT_DELETE);
utils.InstallGetter(GlobalNAME.prototype, "length", NAME_GetLength,
DONT_ENUM | DONT_DELETE);
- utils.InstallGetter(GlobalNAME.prototype, symbolToStringTag,
+ utils.InstallGetter(GlobalNAME.prototype, toStringTagSymbol,
TypedArrayGetToStringTag);
utils.InstallFunctions(GlobalNAME.prototype, DONT_ENUM, [
"subarray", NAMESubArray,
@@ -474,7 +476,7 @@ DATA_VIEW_TYPES(DATA_VIEW_GETTER_SETTER)
// Set up constructor property on the DataView prototype.
%AddNamedProperty(GlobalDataView.prototype, "constructor", GlobalDataView,
DONT_ENUM);
-%AddNamedProperty(GlobalDataView.prototype, symbolToStringTag, "DataView",
+%AddNamedProperty(GlobalDataView.prototype, toStringTagSymbol, "DataView",
READ_ONLY|DONT_ENUM);
utils.InstallGetter(GlobalDataView.prototype, "buffer", DataViewGetBufferJS);
diff --git a/deps/v8/src/types-inl.h b/deps/v8/src/types-inl.h
index 084f5db812..699d642d59 100644
--- a/deps/v8/src/types-inl.h
+++ b/deps/v8/src/types-inl.h
@@ -16,19 +16,29 @@ namespace internal {
// -----------------------------------------------------------------------------
// TypeImpl
-template <class Config>
+template<class Config>
typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::SignedSmall() {
return i::SmiValuesAre31Bits() ? kSigned31 : kSigned32;
}
-template <class Config>
+template<class Config>
typename TypeImpl<Config>::bitset
TypeImpl<Config>::BitsetType::UnsignedSmall() {
return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
}
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+template<class Config> \
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Name( \
+ Isolate* isolate, Region* region) { \
+ return Class(i::handle(isolate->heap()->name##_map()), region); \
+}
+SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
+
+
template<class Config>
TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) {
TypeImpl* t = static_cast<TypeImpl*>(object);
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index a904c6eefa..af1700254b 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -20,9 +20,14 @@ namespace internal {
// -----------------------------------------------------------------------------
// Range-related helper functions.
-// The result may be invalid (max < min).
+template <class Config>
+bool TypeImpl<Config>::Limits::IsEmpty() {
+ return this->min > this->max;
+}
+
+
template<class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Intersect(
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Intersect(
Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
Limits result(lhs);
@@ -33,17 +38,11 @@ typename TypeImpl<Config>::Limits TypeImpl<Config>::Intersect(
template <class Config>
-bool TypeImpl<Config>::IsEmpty(Limits lim) {
- return lim.min > lim.max;
-}
-
-
-template <class Config>
-typename TypeImpl<Config>::Limits TypeImpl<Config>::Union(Limits lhs,
- Limits rhs) {
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Limits::Union(
+ Limits lhs, Limits rhs) {
DisallowHeapAllocation no_allocation;
- if (IsEmpty(lhs)) return rhs;
- if (IsEmpty(rhs)) return lhs;
+ if (lhs.IsEmpty()) return rhs;
+ if (rhs.IsEmpty()) return lhs;
Limits result(lhs);
if (lhs.min > rhs.min) result.min = rhs.min;
if (lhs.max < rhs.max) result.max = rhs.max;
@@ -56,8 +55,7 @@ bool TypeImpl<Config>::Overlap(
typename TypeImpl<Config>::RangeType* lhs,
typename TypeImpl<Config>::RangeType* rhs) {
DisallowHeapAllocation no_allocation;
- typename TypeImpl<Config>::Limits lim = Intersect(Limits(lhs), Limits(rhs));
- return lim.min <= lim.max;
+ return !Limits::Intersect(Limits(lhs), Limits(rhs)).IsEmpty();
}
@@ -154,7 +152,7 @@ TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
}
-// The smallest bitset subsuming this type.
+// The smallest bitset subsuming this type, possibly not a proper one.
template<class Config>
typename TypeImpl<Config>::bitset
TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
@@ -170,13 +168,9 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
}
return bitset;
}
- if (type->IsClass()) {
- // Little hack to avoid the need for a region for handlification here...
- return Config::is_class(type) ? Lub(*Config::as_class(type)) :
- type->AsClass()->Bound(NULL)->AsBitset();
- }
- if (type->IsConstant()) return type->AsConstant()->Bound()->AsBitset();
- if (type->IsRange()) return type->AsRange()->Bound();
+ if (type->IsClass()) return type->AsClass()->Lub();
+ if (type->IsConstant()) return type->AsConstant()->Lub();
+ if (type->IsRange()) return type->AsRange()->Lub();
if (type->IsContext()) return kInternal & kTaggedPointer;
if (type->IsArray()) return kOtherObject;
if (type->IsFunction()) return kOtherObject; // TODO(rossberg): kFunction
@@ -229,9 +223,9 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case HEAP_NUMBER_TYPE:
return kNumber & kTaggedPointer;
case SIMD128_VALUE_TYPE:
- // TODO(bbudge): Add type bits for SIMD value types.
- return kAny;
+ return kSimd;
case JS_VALUE_TYPE:
+ case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -248,6 +242,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case JS_MAP_TYPE:
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
+ case JS_ITERATOR_RESULT_TYPE:
case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE:
if (map->is_undetectable()) return kUndetectable;
@@ -275,6 +270,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case SHARED_FUNCTION_INFO_TYPE:
case ACCESSOR_PAIR_TYPE:
case FIXED_ARRAY_TYPE:
+ case FIXED_DOUBLE_ARRAY_TYPE:
case BYTE_ARRAY_TYPE:
case BYTECODE_ARRAY_TYPE:
case FOREIGN_TYPE:
@@ -282,10 +278,43 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
case CODE_TYPE:
case PROPERTY_CELL_TYPE:
return kInternal & kTaggedPointer;
- default:
+
+ // Remaining instance types are unsupported for now. If any of them do
+ // require bit set types, they should get kInternal & kTaggedPointer.
+ case MUTABLE_HEAP_NUMBER_TYPE:
+ case FREE_SPACE_TYPE:
+#define FIXED_TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case FIXED_##TYPE##_ARRAY_TYPE:
+
+ TYPED_ARRAYS(FIXED_TYPED_ARRAY_CASE)
+#undef FIXED_TYPED_ARRAY_CASE
+ case FILLER_TYPE:
+ case DECLARED_ACCESSOR_DESCRIPTOR_TYPE:
+ case ACCESS_CHECK_INFO_TYPE:
+ case INTERCEPTOR_INFO_TYPE:
+ case CALL_HANDLER_INFO_TYPE:
+ case FUNCTION_TEMPLATE_INFO_TYPE:
+ case OBJECT_TEMPLATE_INFO_TYPE:
+ case SIGNATURE_INFO_TYPE:
+ case TYPE_SWITCH_INFO_TYPE:
+ case ALLOCATION_SITE_TYPE:
+ case ALLOCATION_MEMENTO_TYPE:
+ case CODE_CACHE_TYPE:
+ case POLYMORPHIC_CODE_CACHE_TYPE:
+ case TYPE_FEEDBACK_INFO_TYPE:
+ case ALIASED_ARGUMENTS_ENTRY_TYPE:
+ case BOX_TYPE:
+ case DEBUG_INFO_TYPE:
+ case BREAK_POINT_INFO_TYPE:
+ case CELL_TYPE:
+ case WEAK_CELL_TYPE:
+ case PROTOTYPE_INFO_TYPE:
+ case SLOPPY_BLOCK_WITH_EVAL_CONTEXT_EXTENSION_TYPE:
UNREACHABLE();
return kNone;
}
+ UNREACHABLE();
+ return kNone;
}
@@ -308,22 +337,21 @@ TypeImpl<Config>::BitsetType::Lub(double value) {
if (i::IsMinusZero(value)) return kMinusZero;
if (std::isnan(value)) return kNaN;
if (IsUint32Double(value) || IsInt32Double(value)) return Lub(value, value);
- return kPlainNumber;
+ return kOtherNumber;
}
-// Minimum values of regular numeric bitsets.
+// Minimum values of plain numeric bitsets.
template <class Config>
const typename TypeImpl<Config>::BitsetType::Boundary
- TypeImpl<Config>::BitsetType::BoundariesArray[] = {
- {kPlainNumber, -V8_INFINITY},
- {kNegative32, kMinInt},
- {kNegative31, -0x40000000},
- {kUnsigned30, 0},
- {kUnsigned31, 0x40000000},
- {kUnsigned32, 0x80000000},
- {kPlainNumber, static_cast<double>(kMaxUInt32) + 1}
-};
+TypeImpl<Config>::BitsetType::BoundariesArray[] = {
+ {kOtherNumber, kPlainNumber, -V8_INFINITY},
+ {kOtherSigned32, kNegative32, kMinInt},
+ {kNegative31, kNegative31, -0x40000000},
+ {kUnsigned30, kUnsigned30, 0},
+ {kOtherUnsigned31, kUnsigned31, 0x40000000},
+ {kOtherUnsigned32, kUnsigned32, 0x80000000},
+ {kOtherNumber, kPlainNumber, static_cast<double>(kMaxUInt32) + 1}};
template <class Config>
@@ -341,6 +369,21 @@ size_t TypeImpl<Config>::BitsetType::BoundariesSize() {
}
+template <class Config>
+typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::ExpandInternals(
+ typename TypeImpl<Config>::bitset bits) {
+ DisallowHeapAllocation no_allocation;
+ if (!(bits & SEMANTIC(kPlainNumber))) return bits; // Shortcut.
+ const Boundary* boundaries = Boundaries();
+ for (size_t i = 0; i < BoundariesSize(); ++i) {
+ DCHECK(BitsetType::Is(boundaries[i].internal, boundaries[i].external));
+ if (bits & SEMANTIC(boundaries[i].internal))
+ bits |= SEMANTIC(boundaries[i].external);
+ }
+ return bits;
+}
+
+
template<class Config>
typename TypeImpl<Config>::bitset
TypeImpl<Config>::BitsetType::Lub(double min, double max) {
@@ -348,18 +391,13 @@ TypeImpl<Config>::BitsetType::Lub(double min, double max) {
int lub = kNone;
const Boundary* mins = Boundaries();
- // Make sure the min-max range touches 0, so we are guaranteed no holes
- // in unions of valid bitsets.
- if (max < -1) max = -1;
- if (min > 0) min = 0;
-
for (size_t i = 1; i < BoundariesSize(); ++i) {
if (min < mins[i].min) {
- lub |= mins[i-1].bits;
+ lub |= mins[i-1].internal;
if (max < mins[i].min) return lub;
}
}
- return lub |= mins[BoundariesSize() - 1].bits;
+ return lub | mins[BoundariesSize() - 1].internal;
}
@@ -371,16 +409,6 @@ typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::NumberBits(
template <class Config>
-void TypeImpl<Config>::BitsetType::CheckNumberBits(bitset bits) {
- // Check that the bitset does not contain any holes in number ranges.
- bitset number_bits = NumberBits(bits);
- if (number_bits != 0) {
- bitset lub = SEMANTIC(Lub(Min(number_bits), Max(number_bits)));
- CHECK(lub == number_bits);
- }
-}
-
-template <class Config>
typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::Glb(
double min, double max) {
DisallowHeapAllocation no_allocation;
@@ -393,13 +421,11 @@ typename TypeImpl<Config>::bitset TypeImpl<Config>::BitsetType::Glb(
for (size_t i = 1; i + 1 < BoundariesSize(); ++i) {
if (min <= mins[i].min) {
if (max + 1 < mins[i + 1].min) break;
- glb |= mins[i].bits;
+ glb |= mins[i].external;
}
}
// OtherNumber also contains float numbers, so it can never be
- // in the greatest lower bound. (There is also the small trouble
- // of kOtherNumber having a range hole, which we can conveniently
- // ignore here.)
+ // in the greatest lower bound.
return glb & ~(SEMANTIC(kOtherNumber));
}
@@ -411,7 +437,7 @@ double TypeImpl<Config>::BitsetType::Min(bitset bits) {
const Boundary* mins = Boundaries();
bool mz = SEMANTIC(bits & kMinusZero);
for (size_t i = 0; i < BoundariesSize(); ++i) {
- if (Is(SEMANTIC(mins[i].bits), bits)) {
+ if (Is(SEMANTIC(mins[i].internal), bits)) {
return mz ? std::min(0.0, mins[i].min) : mins[i].min;
}
}
@@ -426,11 +452,11 @@ double TypeImpl<Config>::BitsetType::Max(bitset bits) {
DCHECK(Is(SEMANTIC(bits), kNumber));
const Boundary* mins = Boundaries();
bool mz = SEMANTIC(bits & kMinusZero);
- if (BitsetType::Is(SEMANTIC(mins[BoundariesSize() - 1].bits), bits)) {
+ if (BitsetType::Is(SEMANTIC(mins[BoundariesSize() - 1].internal), bits)) {
return +V8_INFINITY;
}
for (size_t i = BoundariesSize() - 1; i-- > 0;) {
- if (Is(SEMANTIC(mins[i].bits), bits)) {
+ if (Is(SEMANTIC(mins[i].internal), bits)) {
return mz ?
std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
}
@@ -767,12 +793,12 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
// Deal with bitsets.
result->Set(size++, BitsetType::New(bits, region));
- Limits lims = Limits::Empty(region);
+ Limits lims = Limits::Empty();
size = IntersectAux(type1, type2, result, size, &lims, region);
// If the range is not empty, then insert it into the union and
// remove the number bits from the bitset.
- if (!IsEmpty(lims)) {
+ if (!lims.IsEmpty()) {
size = UpdateRange(RangeType::New(lims, representation, region), result,
size, region);
@@ -814,7 +840,7 @@ typename TypeImpl<Config>::Limits TypeImpl<Config>::ToLimits(bitset bits,
bitset number_bits = BitsetType::NumberBits(bits);
if (number_bits == BitsetType::kNone) {
- return Limits::Empty(region);
+ return Limits::Empty();
}
return Limits(BitsetType::Min(number_bits), BitsetType::Max(number_bits));
@@ -826,7 +852,7 @@ typename TypeImpl<Config>::Limits TypeImpl<Config>::IntersectRangeAndBitset(
TypeHandle range, TypeHandle bitset, Region* region) {
Limits range_lims(range->AsRange());
Limits bitset_lims = ToLimits(bitset->AsBitset(), region);
- return Intersect(range_lims, bitset_lims);
+ return Limits::Intersect(range_lims, bitset_lims);
}
@@ -857,21 +883,22 @@ int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
if (rhs->IsBitset()) {
Limits lim = IntersectRangeAndBitset(lhs, rhs, region);
- if (!IsEmpty(lim)) {
- *lims = Union(lim, *lims);
+ if (!lim.IsEmpty()) {
+ *lims = Limits::Union(lim, *lims);
}
return size;
}
if (rhs->IsClass()) {
- *lims = Union(Limits(lhs->AsRange()), *lims);
+ *lims = Limits::Union(Limits(lhs->AsRange()), *lims);
}
if (rhs->IsConstant() && Contains(lhs->AsRange(), rhs->AsConstant())) {
return AddToUnion(rhs, result, size, region);
}
if (rhs->IsRange()) {
- Limits lim = Intersect(Limits(lhs->AsRange()), Limits(rhs->AsRange()));
- if (!IsEmpty(lim)) {
- *lims = Union(lim, *lims);
+ Limits lim = Limits::Intersect(
+ Limits(lhs->AsRange()), Limits(rhs->AsRange()));
+ if (!lim.IsEmpty()) {
+ *lims = Limits::Union(lim, *lims);
}
}
return size;
@@ -895,7 +922,7 @@ int TypeImpl<Config>::IntersectAux(TypeHandle lhs, TypeHandle rhs,
// Make sure that we produce a well-formed range and bitset:
// If the range is non-empty, the number bits in the bitset should be
-// clear. Moreover, if we have a canonical range (such as Signed32(),
+// clear. Moreover, if we have a canonical range (such as Signed32),
// we want to produce a bitset rather than a range.
template <class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
@@ -907,10 +934,10 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
return range;
}
- // If the range is contained within the bitset, return an empty range
- // (but make sure we take the representation).
+ // If the range is semantically contained within the bitset, return None and
+ // leave the bitset untouched.
bitset range_lub = SEMANTIC(range->BitsetLub());
- if (BitsetType::Is(BitsetType::NumberBits(range_lub), *bits)) {
+ if (BitsetType::Is(range_lub, *bits)) {
return None(region);
}
@@ -922,6 +949,8 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeRangeAndBitset(
double range_max = range->Max();
// Remove the number bits from the bitset, they would just confuse us now.
+ // NOTE: bits contains OtherNumber iff bits contains PlainNumber, in which
+ // case we already returned after the subtype check above.
*bits &= ~number_bits;
if (range_min <= bitset_min && range_max >= bitset_max) {
@@ -958,7 +987,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
// Figure out the representation of the result.
// The rest of the method should not change this representation and
- // it should make any decisions based on representations (i.e.,
+ // it should not make any decisions based on representations (i.e.,
// it should only use the semantic part of types).
const bitset representation =
type1->Representation() | type2->Representation();
@@ -981,7 +1010,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
RangeType* range1 = type1->GetRange();
RangeType* range2 = type2->GetRange();
if (range1 != NULL && range2 != NULL) {
- Limits lims = Union(Limits(range1), Limits(range2));
+ Limits lims = Limits::Union(Limits(range1), Limits(range2));
RangeHandle union_range = RangeType::New(lims, representation, region);
range = NormalizeRangeAndBitset(union_range, &new_bitset, region);
} else if (range1 != NULL) {
@@ -1231,6 +1260,7 @@ const char* TypeImpl<Config>::BitsetType::Name(bitset bits) {
#define RETURN_NAMED_SEMANTIC_TYPE(type, value) \
case SEMANTIC(k##type): return #type;
SEMANTIC_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
+ INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_SEMANTIC_TYPE)
#undef RETURN_NAMED_SEMANTIC_TYPE
default:
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 31ee95cbb4..3acd5cc842 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -95,10 +95,13 @@ namespace internal {
// RANGE TYPES
//
// A range type represents a continuous integer interval by its minimum and
-// maximum value. Either value might be an infinity.
+// maximum value. Either value may be an infinity, in which case that infinity
+// itself is also included in the range. A range never contains NaN or -0.
//
-// Constant(v) is considered a subtype of Range(x..y) if v happens to be an
-// integer between x and y.
+// If a value v happens to be an integer n, then Constant(v) is considered a
+// subtype of Range(n, n) (and therefore also a subtype of any larger range).
+// In order to avoid large unions, however, it is usually a good idea to use
+// Range rather than Constant.
//
//
// PREDICATES
@@ -207,8 +210,8 @@ namespace internal {
V(Symbol, 1u << 12 | REPRESENTATION(kTaggedPointer)) \
V(InternalizedString, 1u << 13 | REPRESENTATION(kTaggedPointer)) \
V(OtherString, 1u << 14 | REPRESENTATION(kTaggedPointer)) \
- V(Undetectable, 1u << 15 | REPRESENTATION(kTaggedPointer)) \
- /* Unused semantic bit 1u << 16 in case you are looking for a bit. */ \
+ V(Simd, 1u << 15 | REPRESENTATION(kTaggedPointer)) \
+ V(Undetectable, 1u << 16 | REPRESENTATION(kTaggedPointer)) \
V(OtherObject, 1u << 17 | REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | REPRESENTATION(kTaggedPointer)) \
V(Internal, 1u << 19 | REPRESENTATION(kTagged | kUntagged)) \
@@ -231,7 +234,7 @@ namespace internal {
V(NumberOrString, kNumber | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
- V(Primitive, kSymbol | kPlainPrimitive) \
+ V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
V(DetectableReceiver, kOtherObject | kProxy) \
V(Detectable, kDetectableReceiver | kNumber | kName) \
V(Object, kOtherObject | kUndetectable) \
@@ -254,6 +257,11 @@ namespace internal {
* -2^31 -2^30 0 2^30 2^31 2^32
*
* E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
+ *
+ * Some of the atomic numerical bitsets are internal only (see
+ * INTERNAL_BITSET_TYPE_LIST). To a types user, they should only occur in
+ * union with certain other bitsets. For instance, OtherNumber should only
+ * occur as part of PlainNumber.
*/
#define PROPER_BITSET_TYPE_LIST(V) \
@@ -277,6 +285,7 @@ namespace internal {
// typedef Range;
// typedef Region;
// template<class> struct Handle { typedef type; } // No template typedefs...
+//
// template<class T> static Handle<T>::type null_handle();
// template<class T> static Handle<T>::type handle(T* t); // !is_bitset(t)
// template<class T> static Handle<T>::type cast(Handle<Type>::type);
@@ -423,20 +432,21 @@ class TypeImpl : public Config::Base {
return function;
}
+#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+ static TypeHandle Name(Isolate* isolate, Region* region);
+ SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
+#undef CONSTRUCT_SIMD_TYPE
+
static TypeHandle Union(TypeHandle type1, TypeHandle type2, Region* reg);
static TypeHandle Intersect(TypeHandle type1, TypeHandle type2, Region* reg);
- static TypeImpl* Union(TypeImpl* type1, TypeImpl* type2) {
- return BitsetType::New(type1->AsBitset() | type2->AsBitset());
- }
- static TypeImpl* Intersect(TypeImpl* type1, TypeImpl* type2) {
- return BitsetType::New(type1->AsBitset() & type2->AsBitset());
- }
static TypeHandle Of(double value, Region* region) {
- return Config::from_bitset(BitsetType::Lub(value), region);
+ return Config::from_bitset(BitsetType::ExpandInternals(
+ BitsetType::Lub(value)), region);
}
static TypeHandle Of(i::Object* value, Region* region) {
- return Config::from_bitset(BitsetType::Lub(value), region);
+ return Config::from_bitset(BitsetType::ExpandInternals(
+ BitsetType::Lub(value)), region);
}
static TypeHandle Of(i::Handle<i::Object> value, Region* region) {
return Of(*value, region);
@@ -513,11 +523,17 @@ class TypeImpl : public Config::Base {
double Min();
double Max();
- // Extracts a range from the type. If the type is a range, it just
- // returns it; if it is a union, it returns the range component.
- // Note that it does not contain range for constants.
+ // Extracts a range from the type: if the type is a range or a union
+ // containing a range, that range is returned; otherwise, NULL is returned.
RangeType* GetRange();
+ static bool IsInteger(double x) {
+ return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
+ }
+ static bool IsInteger(i::Object* x) {
+ return x->IsNumber() && IsInteger(x->Number());
+ }
+
int NumClasses();
int NumConstants();
@@ -589,24 +605,17 @@ class TypeImpl : public Config::Base {
bool SlowIs(TypeImpl* that);
bool SemanticIs(TypeImpl* that);
- static bool IsInteger(double x) {
- return nearbyint(x) == x && !i::IsMinusZero(x); // Allows for infinities.
- }
- static bool IsInteger(i::Object* x) {
- return x->IsNumber() && IsInteger(x->Number());
- }
-
struct Limits {
double min;
double max;
Limits(double min, double max) : min(min), max(max) {}
explicit Limits(RangeType* range) : min(range->Min()), max(range->Max()) {}
- static Limits Empty(Region* region) { return Limits(1, 0); }
+ bool IsEmpty();
+ static Limits Empty() { return Limits(1, 0); }
+ static Limits Intersect(Limits lhs, Limits rhs);
+ static Limits Union(Limits lhs, Limits rhs);
};
- static bool IsEmpty(Limits lim);
- static Limits Intersect(Limits lhs, Limits rhs);
- static Limits Union(Limits lhs, Limits rhs);
static bool Overlap(RangeType* lhs, RangeType* rhs);
static bool Contains(RangeType* lhs, RangeType* rhs);
static bool Contains(RangeType* range, ConstantType* constant);
@@ -655,11 +664,9 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
bitset Bitset() { return Config::as_bitset(this); }
static TypeImpl* New(bitset bits) {
- if (FLAG_enable_slow_asserts) CheckNumberBits(bits);
return Config::from_bitset(bits);
}
static TypeHandle New(bitset bits, Region* region) {
- if (FLAG_enable_slow_asserts) CheckNumberBits(bits);
return Config::from_bitset(bits, region);
}
@@ -685,6 +692,7 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
static bitset Lub(i::Object* value);
static bitset Lub(double value);
static bitset Lub(double min, double max);
+ static bitset ExpandInternals(bitset bits);
static const char* Name(bitset);
static void Print(std::ostream& os, bitset); // NOLINT
@@ -696,14 +704,13 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
private:
struct Boundary {
- bitset bits;
+ bitset internal;
+ bitset external;
double min;
};
static const Boundary BoundariesArray[];
static inline const Boundary* Boundaries();
static inline size_t BoundariesSize();
-
- static void CheckNumberBits(bitset bits);
};
@@ -788,11 +795,6 @@ class TypeImpl<Config>::UnionType : public StructuralType {
template<class Config>
class TypeImpl<Config>::ClassType : public StructuralType {
public:
- TypeHandle Bound(Region* region) {
- return Config::is_class(this) ?
- BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region) :
- this->Get(0);
- }
i::Handle<i::Map> Map() {
return Config::is_class(this) ? Config::as_class(this) :
this->template GetValue<i::Map>(1);
@@ -814,6 +816,14 @@ class TypeImpl<Config>::ClassType : public StructuralType {
DCHECK(type->IsClass());
return static_cast<ClassType*>(type);
}
+
+ private:
+ template<class> friend class TypeImpl;
+ bitset Lub() {
+ return Config::is_class(this) ?
+ BitsetType::Lub(*Config::as_class(this)) :
+ this->Get(0)->AsBitset();
+ }
};
@@ -823,7 +833,6 @@ class TypeImpl<Config>::ClassType : public StructuralType {
template<class Config>
class TypeImpl<Config>::ConstantType : public StructuralType {
public:
- TypeHandle Bound() { return this->Get(0); }
i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); }
static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
@@ -838,6 +847,10 @@ class TypeImpl<Config>::ConstantType : public StructuralType {
DCHECK(type->IsConstant());
return static_cast<ConstantType*>(type);
}
+
+ private:
+ template<class> friend class TypeImpl;
+ bitset Lub() { return this->Get(0)->AsBitset(); }
};
// TODO(neis): Also cache value if numerical.
// TODO(neis): Allow restricting the representation.
@@ -849,7 +862,6 @@ class TypeImpl<Config>::ConstantType : public StructuralType {
template <class Config>
class TypeImpl<Config>::RangeType : public TypeImpl<Config> {
public:
- bitset Bound() { return Config::range_get_bitset(Config::as_range(this)); }
double Min() { return Config::range_get_double(Config::as_range(this), 0); }
double Max() { return Config::range_get_double(Config::as_range(this), 1); }
@@ -879,8 +891,13 @@ class TypeImpl<Config>::RangeType : public TypeImpl<Config> {
DCHECK(type->IsRange());
return static_cast<RangeType*>(type);
}
+
+ private:
+ template<class> friend class TypeImpl;
+ bitset Lub() {
+ return Config::range_get_bitset(Config::as_range(this));
+ }
};
-// TODO(neis): Also cache min and max values.
// -----------------------------------------------------------------------------
@@ -1128,8 +1145,8 @@ struct BoundsImpl {
}
// Unrestricted bounds.
- static BoundsImpl Unbounded(Region* region) {
- return BoundsImpl(Type::None(region), Type::Any(region));
+ static BoundsImpl Unbounded() {
+ return BoundsImpl(Type::None(), Type::Any());
}
// Meet: both b1 and b2 are known to hold.
diff --git a/deps/v8/src/typing-asm.cc b/deps/v8/src/typing-asm.cc
new file mode 100644
index 0000000000..f7688964a5
--- /dev/null
+++ b/deps/v8/src/typing-asm.cc
@@ -0,0 +1,1076 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/typing-asm.h"
+
+#include "src/ast.h"
+#include "src/codegen.h"
+#include "src/scopes.h"
+#include "src/zone-type-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace {
+
+base::LazyInstance<ZoneTypeCache>::type kCache = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+
+#define FAIL(node, msg) \
+ do { \
+ valid_ = false; \
+ int line = node->position() == RelocInfo::kNoPosition \
+ ? -1 \
+ : script_->GetLineNumber(node->position()); \
+ base::OS::SNPrintF(error_message_, sizeof(error_message_), \
+ "asm: line %d: %s\n", line + 1, msg); \
+ return; \
+ } while (false)
+
+
+#define RECURSE(call) \
+ do { \
+ DCHECK(!HasStackOverflow()); \
+ call; \
+ if (HasStackOverflow()) return; \
+ if (!valid_) return; \
+ } while (false)
+
+
+AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
+ FunctionLiteral* root)
+ : script_(script),
+ root_(root),
+ valid_(true),
+ stdlib_types_(zone),
+ stdlib_heap_types_(zone),
+ stdlib_math_types_(zone),
+ global_variable_type_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ local_variable_type_(HashMap::PointersMatch,
+ ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone)),
+ in_function_(false),
+ building_function_tables_(false),
+ cache_(kCache.Get()) {
+ InitializeAstVisitor(isolate, zone);
+ InitializeStdlib();
+}
+
+
+bool AsmTyper::Validate() {
+ VisitAsmModule(root_);
+ return valid_ && !HasStackOverflow();
+}
+
+
+void AsmTyper::VisitAsmModule(FunctionLiteral* fun) {
+ Scope* scope = fun->scope();
+ if (!scope->is_function_scope()) FAIL(fun, "not at function scope");
+
+ // Module parameters.
+ for (int i = 0; i < scope->num_parameters(); ++i) {
+ Variable* param = scope->parameter(i);
+ DCHECK(GetType(param) == NULL);
+ SetType(param, Type::None(zone()));
+ }
+
+ ZoneList<Declaration*>* decls = scope->declarations();
+
+ // Set all globals to type Any.
+ VariableDeclaration* decl = scope->function();
+ if (decl != NULL) SetType(decl->proxy()->var(), Type::None());
+ RECURSE(VisitDeclarations(scope->declarations()));
+
+ // Validate global variables.
+ RECURSE(VisitStatements(fun->body()));
+
+ // Validate function annotations.
+ for (int i = 0; i < decls->length(); ++i) {
+ FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
+ if (decl != NULL) {
+ RECURSE(VisitFunctionAnnotation(decl->fun()));
+ Variable* var = decl->proxy()->var();
+ DCHECK(GetType(var) == NULL);
+ SetType(var, computed_type_);
+ DCHECK(GetType(var) != NULL);
+ }
+ }
+
+ // Build function tables.
+ building_function_tables_ = true;
+ RECURSE(VisitStatements(fun->body()));
+ building_function_tables_ = false;
+
+ // Validate function bodies.
+ for (int i = 0; i < decls->length(); ++i) {
+ FunctionDeclaration* decl = decls->at(i)->AsFunctionDeclaration();
+ if (decl != NULL) {
+ RECURSE(
+ VisitWithExpectation(decl->fun(), Type::Any(zone()), "UNREACHABLE"));
+ if (!computed_type_->IsFunction()) {
+ FAIL(decl->fun(), "function literal expected to be a function");
+ }
+ }
+ }
+
+ // Validate exports.
+ ReturnStatement* stmt = fun->body()->last()->AsReturnStatement();
+ RECURSE(VisitWithExpectation(stmt->expression(), Type::Object(),
+ "expected object export"));
+}
+
+
+void AsmTyper::VisitVariableDeclaration(VariableDeclaration* decl) {
+ Variable* var = decl->proxy()->var();
+ if (var->location() != VariableLocation::PARAMETER) {
+ if (GetType(var) == NULL) {
+ SetType(var, Type::Any(zone()));
+ } else {
+ DCHECK(!GetType(var)->IsFunction());
+ }
+ }
+ DCHECK(GetType(var) != NULL);
+ intish_ = 0;
+}
+
+
+void AsmTyper::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+ if (in_function_) {
+ FAIL(decl, "function declared inside another");
+ }
+}
+
+
+void AsmTyper::VisitFunctionAnnotation(FunctionLiteral* fun) {
+ // Extract result type.
+ ZoneList<Statement*>* body = fun->body();
+ Type* result_type = Type::Undefined(zone());
+ if (body->length() > 0) {
+ ReturnStatement* stmt = body->last()->AsReturnStatement();
+ if (stmt != NULL) {
+ RECURSE(VisitExpressionAnnotation(stmt->expression()));
+ result_type = computed_type_;
+ }
+ }
+ Type::FunctionType* type =
+ Type::Function(result_type, Type::Any(), fun->parameter_count(), zone())
+ ->AsFunction();
+
+ // Extract parameter types.
+ bool good = true;
+ for (int i = 0; i < fun->parameter_count(); ++i) {
+ good = false;
+ if (i >= body->length()) break;
+ ExpressionStatement* stmt = body->at(i)->AsExpressionStatement();
+ if (stmt == NULL) break;
+ Assignment* expr = stmt->expression()->AsAssignment();
+ if (expr == NULL || expr->is_compound()) break;
+ VariableProxy* proxy = expr->target()->AsVariableProxy();
+ if (proxy == NULL) break;
+ Variable* var = proxy->var();
+ if (var->location() != VariableLocation::PARAMETER || var->index() != i)
+ break;
+ RECURSE(VisitExpressionAnnotation(expr->value()));
+ SetType(var, computed_type_);
+ type->InitParameter(i, computed_type_);
+ good = true;
+ }
+ if (!good) FAIL(fun, "missing parameter type annotations");
+
+ SetResult(fun, type);
+}
+
+
+void AsmTyper::VisitExpressionAnnotation(Expression* expr) {
+ // Normal +x or x|0 annotations.
+ BinaryOperation* bin = expr->AsBinaryOperation();
+ if (bin != NULL) {
+ Literal* right = bin->right()->AsLiteral();
+ if (right != NULL) {
+ switch (bin->op()) {
+ case Token::MUL: // We encode +x as 1*x
+ if (right->raw_value()->ContainsDot() &&
+ right->raw_value()->AsNumber() == 1.0) {
+ SetResult(expr, cache_.kFloat64);
+ return;
+ }
+ break;
+ case Token::BIT_OR:
+ if (!right->raw_value()->ContainsDot() &&
+ right->raw_value()->AsNumber() == 0.0) {
+ SetResult(expr, cache_.kInt32);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ FAIL(expr, "invalid type annotation on binary op");
+ }
+
+ // Numbers or the undefined literal (for empty returns).
+ if (expr->IsLiteral()) {
+ RECURSE(VisitWithExpectation(expr, Type::Any(), "invalid literal"));
+ return;
+ }
+
+ Call* call = expr->AsCall();
+ if (call != NULL) {
+ if (call->expression()->IsVariableProxy()) {
+ RECURSE(VisitWithExpectation(
+ call->expression(), Type::Any(zone()),
+ "only fround allowed on expression annotations"));
+ if (!computed_type_->Is(
+ Type::Function(cache_.kFloat32, Type::Number(zone()), zone()))) {
+ FAIL(call->expression(),
+ "only fround allowed on expression annotations");
+ }
+ if (call->arguments()->length() != 1) {
+ FAIL(call, "invalid argument count calling fround");
+ }
+ SetResult(expr, cache_.kFloat32);
+ return;
+ }
+ }
+
+ FAIL(expr, "invalid type annotation");
+}
+
+
+void AsmTyper::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0; i < stmts->length(); ++i) {
+ Statement* stmt = stmts->at(i);
+ RECURSE(Visit(stmt));
+ }
+}
+
+
+void AsmTyper::VisitBlock(Block* stmt) {
+ RECURSE(VisitStatements(stmt->statements()));
+}
+
+
+void AsmTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
+ RECURSE(VisitWithExpectation(stmt->expression(), Type::Any(),
+ "expression statement expected to be any"));
+}
+
+
+void AsmTyper::VisitEmptyStatement(EmptyStatement* stmt) {}
+
+
+void AsmTyper::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
+void AsmTyper::VisitEmptyParentheses(EmptyParentheses* expr) { UNREACHABLE(); }
+
+
+void AsmTyper::VisitIfStatement(IfStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "if statement inside module body");
+ }
+ RECURSE(VisitWithExpectation(stmt->condition(), cache_.kInt32,
+ "if condition expected to be integer"));
+ RECURSE(Visit(stmt->then_statement()));
+ RECURSE(Visit(stmt->else_statement()));
+}
+
+
+void AsmTyper::VisitContinueStatement(ContinueStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "continue statement inside module body");
+ }
+}
+
+
+void AsmTyper::VisitBreakStatement(BreakStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "continue statement inside module body");
+ }
+}
+
+
+void AsmTyper::VisitReturnStatement(ReturnStatement* stmt) {
+ // Handle module return statement in VisitAsmModule.
+ if (!in_function_) {
+ return;
+ }
+ RECURSE(
+ VisitWithExpectation(stmt->expression(), return_type_,
+ "return expression expected to have return type"));
+}
+
+
+void AsmTyper::VisitWithStatement(WithStatement* stmt) {
+ FAIL(stmt, "bad with statement");
+}
+
+
+void AsmTyper::VisitSwitchStatement(SwitchStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "switch statement inside module body");
+ }
+ RECURSE(VisitWithExpectation(stmt->tag(), cache_.kInt32,
+ "switch expression non-integer"));
+ ZoneList<CaseClause*>* clauses = stmt->cases();
+ for (int i = 0; i < clauses->length(); ++i) {
+ CaseClause* clause = clauses->at(i);
+ if (clause->is_default()) continue;
+ Expression* label = clause->label();
+ RECURSE(
+ VisitWithExpectation(label, cache_.kInt32, "case label non-integer"));
+ if (!label->IsLiteral()) FAIL(label, "non-literal case label");
+ Handle<Object> value = label->AsLiteral()->value();
+ int32_t value32;
+ if (!value->ToInt32(&value32)) FAIL(label, "illegal case label value");
+ // TODO(bradnelson): Detect duplicates.
+ ZoneList<Statement*>* stmts = clause->statements();
+ RECURSE(VisitStatements(stmts));
+ }
+}
+
+
+void AsmTyper::VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
+
+
+void AsmTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "do statement inside module body");
+ }
+ RECURSE(Visit(stmt->body()));
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ "do condition expected to be integer"));
+}
+
+
+void AsmTyper::VisitWhileStatement(WhileStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "while statement inside module body");
+ }
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ "while condition expected to be integer"));
+ RECURSE(Visit(stmt->body()));
+}
+
+
+void AsmTyper::VisitForStatement(ForStatement* stmt) {
+ if (!in_function_) {
+ FAIL(stmt, "for statement inside module body");
+ }
+ if (stmt->init() != NULL) {
+ RECURSE(Visit(stmt->init()));
+ }
+ if (stmt->cond() != NULL) {
+ RECURSE(VisitWithExpectation(stmt->cond(), cache_.kInt32,
+ "for condition expected to be integer"));
+ }
+ if (stmt->next() != NULL) {
+ RECURSE(Visit(stmt->next()));
+ }
+ RECURSE(Visit(stmt->body()));
+}
+
+
+void AsmTyper::VisitForInStatement(ForInStatement* stmt) {
+ FAIL(stmt, "for-in statement encountered");
+}
+
+
+void AsmTyper::VisitForOfStatement(ForOfStatement* stmt) {
+ FAIL(stmt, "for-of statement encountered");
+}
+
+
+void AsmTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ FAIL(stmt, "try statement encountered");
+}
+
+
+void AsmTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ FAIL(stmt, "try statement encountered");
+}
+
+
+void AsmTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ FAIL(stmt, "debugger statement encountered");
+}
+
+
+void AsmTyper::VisitFunctionLiteral(FunctionLiteral* expr) {
+ Scope* scope = expr->scope();
+ DCHECK(scope->is_function_scope());
+ if (in_function_) {
+ FAIL(expr, "invalid nested function");
+ }
+
+ if (!expr->bounds().upper->IsFunction()) {
+ FAIL(expr, "invalid function literal");
+ }
+
+ Type::FunctionType* type = expr->bounds().upper->AsFunction();
+ Type* save_return_type = return_type_;
+ return_type_ = type->Result();
+ in_function_ = true;
+ local_variable_type_.Clear();
+ RECURSE(VisitDeclarations(scope->declarations()));
+ RECURSE(VisitStatements(expr->body()));
+ in_function_ = false;
+ return_type_ = save_return_type;
+ IntersectResult(expr, type);
+}
+
+
+void AsmTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+ FAIL(expr, "function info literal encountered");
+}
+
+
+void AsmTyper::VisitConditional(Conditional* expr) {
+ RECURSE(VisitWithExpectation(expr->condition(), cache_.kInt32,
+ "condition expected to be integer"));
+ RECURSE(VisitWithExpectation(
+ expr->then_expression(), expected_type_,
+ "conditional then branch type mismatch with enclosing expression"));
+ Type* then_type = computed_type_;
+ RECURSE(VisitWithExpectation(
+ expr->else_expression(), expected_type_,
+ "conditional else branch type mismatch with enclosing expression"));
+ Type* else_type = computed_type_;
+ Type* type = Type::Intersect(then_type, else_type, zone());
+ if (!(type->Is(cache_.kInt32) || type->Is(cache_.kFloat64))) {
+ FAIL(expr, "ill-typed conditional");
+ }
+ IntersectResult(expr, type);
+}
+
+
+void AsmTyper::VisitVariableProxy(VariableProxy* expr) {
+ Variable* var = expr->var();
+ if (GetType(var) == NULL) {
+ FAIL(expr, "unbound variable");
+ }
+ Type* type = Type::Intersect(GetType(var), expected_type_, zone());
+ if (type->Is(cache_.kInt32)) {
+ type = cache_.kInt32;
+ }
+ SetType(var, type);
+ intish_ = 0;
+ IntersectResult(expr, type);
+}
+
+
+void AsmTyper::VisitLiteral(Literal* expr) {
+ intish_ = 0;
+ Handle<Object> value = expr->value();
+ if (value->IsNumber()) {
+ int32_t i;
+ uint32_t u;
+ if (expr->raw_value()->ContainsDot()) {
+ IntersectResult(expr, cache_.kFloat64);
+ } else if (value->ToUint32(&u)) {
+ IntersectResult(expr, cache_.kInt32);
+ } else if (value->ToInt32(&i)) {
+ IntersectResult(expr, cache_.kInt32);
+ } else {
+ FAIL(expr, "illegal number");
+ }
+ } else if (value->IsString()) {
+ IntersectResult(expr, Type::String());
+ } else if (value->IsUndefined()) {
+ IntersectResult(expr, Type::Undefined());
+ } else {
+ FAIL(expr, "illegal literal");
+ }
+}
+
+
+void AsmTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
+ FAIL(expr, "regular expression encountered");
+}
+
+
+void AsmTyper::VisitObjectLiteral(ObjectLiteral* expr) {
+ if (in_function_) {
+ FAIL(expr, "object literal in function");
+ }
+ // Allowed for asm module's export declaration.
+ ZoneList<ObjectLiteralProperty*>* props = expr->properties();
+ for (int i = 0; i < props->length(); ++i) {
+ ObjectLiteralProperty* prop = props->at(i);
+ RECURSE(VisitWithExpectation(prop->value(), Type::Any(zone()),
+ "object property expected to be a function"));
+ if (!computed_type_->IsFunction()) {
+ FAIL(prop->value(), "non-function in function table");
+ }
+ }
+ IntersectResult(expr, Type::Object(zone()));
+}
+
+
+void AsmTyper::VisitArrayLiteral(ArrayLiteral* expr) {
+ if (in_function_) {
+ FAIL(expr, "array literal inside a function");
+ }
+ // Allowed for function tables.
+ ZoneList<Expression*>* values = expr->values();
+ Type* elem_type = Type::None(zone());
+ for (int i = 0; i < values->length(); ++i) {
+ Expression* value = values->at(i);
+ RECURSE(VisitWithExpectation(value, Type::Any(), "UNREACHABLE"));
+ if (!computed_type_->IsFunction()) {
+ FAIL(value, "array component expected to be a function");
+ }
+ elem_type = Type::Union(elem_type, computed_type_, zone());
+ }
+ array_size_ = values->length();
+ IntersectResult(expr, Type::Array(elem_type, zone()));
+}
+
+
+void AsmTyper::VisitAssignment(Assignment* expr) {
+ // Handle function tables and everything else in different passes.
+ if (!in_function_) {
+ if (expr->value()->IsArrayLiteral()) {
+ if (!building_function_tables_) {
+ return;
+ }
+ } else {
+ if (building_function_tables_) {
+ return;
+ }
+ }
+ }
+ if (expr->is_compound()) FAIL(expr, "compound assignment encountered");
+ Type* type = expected_type_;
+ RECURSE(VisitWithExpectation(
+ expr->value(), type, "assignment value expected to match surrounding"));
+ if (intish_ != 0) {
+ FAIL(expr, "value still an intish");
+ }
+ RECURSE(VisitWithExpectation(expr->target(), computed_type_,
+ "assignment target expected to match value"));
+ if (intish_ != 0) {
+ FAIL(expr, "value still an intish");
+ }
+ IntersectResult(expr, computed_type_);
+}
+
+
+void AsmTyper::VisitYield(Yield* expr) {
+ FAIL(expr, "yield expression encountered");
+}
+
+
+void AsmTyper::VisitThrow(Throw* expr) {
+ FAIL(expr, "throw statement encountered");
+}
+
+
+int AsmTyper::ElementShiftSize(Type* type) {
+ if (type->Is(cache_.kInt8) || type->Is(cache_.kUint8)) return 0;
+ if (type->Is(cache_.kInt16) || type->Is(cache_.kUint16)) return 1;
+ if (type->Is(cache_.kInt32) || type->Is(cache_.kUint32) ||
+ type->Is(cache_.kFloat32))
+ return 2;
+ if (type->Is(cache_.kFloat64)) return 3;
+ return -1;
+}
+
+
+void AsmTyper::VisitHeapAccess(Property* expr) {
+ Type::ArrayType* array_type = computed_type_->AsArray();
+ size_t size = array_size_;
+ Type* type = array_type->AsArray()->Element();
+ if (type->IsFunction()) {
+ BinaryOperation* bin = expr->key()->AsBinaryOperation();
+ if (bin == NULL || bin->op() != Token::BIT_AND) {
+ FAIL(expr->key(), "expected & in call");
+ }
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
+ "array index expected to be integer"));
+ Literal* right = bin->right()->AsLiteral();
+ if (right == NULL || right->raw_value()->ContainsDot()) {
+ FAIL(right, "call mask must be integer");
+ }
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
+ "call mask expected to be integer"));
+ if (static_cast<size_t>(right->raw_value()->AsNumber()) != size - 1) {
+ FAIL(right, "call mask must match function table");
+ }
+ bin->set_bounds(Bounds(cache_.kInt32));
+ } else {
+ BinaryOperation* bin = expr->key()->AsBinaryOperation();
+ if (bin == NULL || bin->op() != Token::SAR) {
+ FAIL(expr->key(), "expected >> in heap access");
+ }
+ RECURSE(VisitWithExpectation(bin->left(), cache_.kInt32,
+ "array index expected to be integer"));
+ Literal* right = bin->right()->AsLiteral();
+ if (right == NULL || right->raw_value()->ContainsDot()) {
+ FAIL(right, "heap access shift must be integer");
+ }
+ RECURSE(VisitWithExpectation(bin->right(), cache_.kInt32,
+ "array shift expected to be integer"));
+ int n = static_cast<int>(right->raw_value()->AsNumber());
+ int expected_shift = ElementShiftSize(type);
+ if (expected_shift < 0 || n != expected_shift) {
+ FAIL(right, "heap access shift must match element size");
+ }
+ bin->set_bounds(Bounds(cache_.kInt32));
+ }
+ IntersectResult(expr, type);
+}
+
+
+void AsmTyper::VisitProperty(Property* expr) {
+ // stdlib.Math.x
+ Property* inner_prop = expr->obj()->AsProperty();
+ if (inner_prop != NULL) {
+ // Get property name.
+ Literal* key = expr->key()->AsLiteral();
+ if (key == NULL || !key->IsPropertyName())
+ FAIL(expr, "invalid type annotation on property 2");
+ Handle<String> name = key->AsPropertyName();
+
+ // Check that inner property name is "Math".
+ Literal* math_key = inner_prop->key()->AsLiteral();
+ if (math_key == NULL || !math_key->IsPropertyName() ||
+ !math_key->AsPropertyName()->IsUtf8EqualTo(CStrVector("Math")))
+ FAIL(expr, "invalid type annotation on stdlib (a1)");
+
+ // Check that object is stdlib.
+ VariableProxy* proxy = inner_prop->obj()->AsVariableProxy();
+ if (proxy == NULL) FAIL(expr, "invalid type annotation on stdlib (a2)");
+ Variable* var = proxy->var();
+ if (var->location() != VariableLocation::PARAMETER || var->index() != 0)
+ FAIL(expr, "invalid type annotation on stdlib (a3)");
+
+ // Look up library type.
+ Type* type = LibType(stdlib_math_types_, name);
+ if (type == NULL) FAIL(expr, "unknown standard function 3 ");
+ SetResult(expr, type);
+ return;
+ }
+
+ // Only recurse at this point so that we avoid needing
+ // stdlib.Math to have a real type.
+ RECURSE(VisitWithExpectation(expr->obj(), Type::Any(),
+ "property holder expected to be object"));
+
+ // For heap view or function table access.
+ if (computed_type_->IsArray()) {
+ VisitHeapAccess(expr);
+ return;
+ }
+
+ // Get property name.
+ Literal* key = expr->key()->AsLiteral();
+ if (key == NULL || !key->IsPropertyName())
+ FAIL(expr, "invalid type annotation on property 3");
+ Handle<String> name = key->AsPropertyName();
+
+ // stdlib.x or foreign.x
+ VariableProxy* proxy = expr->obj()->AsVariableProxy();
+ if (proxy != NULL) {
+ Variable* var = proxy->var();
+ if (var->location() != VariableLocation::PARAMETER) {
+ FAIL(expr, "invalid type annotation on variable");
+ }
+ switch (var->index()) {
+ case 0: {
+ // Object is stdlib, look up library type.
+ Type* type = LibType(stdlib_types_, name);
+ if (type == NULL) {
+ FAIL(expr, "unknown standard function 4");
+ }
+ SetResult(expr, type);
+ return;
+ }
+ case 1:
+ // Object is foreign lib.
+ SetResult(expr, expected_type_);
+ return;
+ default:
+ FAIL(expr, "invalid type annotation on parameter");
+ }
+ }
+
+ FAIL(expr, "invalid property access");
+}
+
+
+void AsmTyper::VisitCall(Call* expr) {
+ RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
+ "callee expected to be any"));
+ if (computed_type_->IsFunction()) {
+ Type::FunctionType* fun_type = computed_type_->AsFunction();
+ ZoneList<Expression*>* args = expr->arguments();
+ if (fun_type->Arity() != args->length()) {
+ FAIL(expr, "call with wrong arity");
+ }
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(VisitWithExpectation(
+ arg, fun_type->Parameter(i),
+ "call argument expected to match callee parameter"));
+ }
+ IntersectResult(expr, fun_type->Result());
+ } else if (computed_type_->Is(Type::Any())) {
+ // For foreign calls.
+ ZoneList<Expression*>* args = expr->arguments();
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(VisitWithExpectation(arg, Type::Any(),
+ "foreign call argument expected to be any"));
+ }
+ IntersectResult(expr, Type::Number());
+ } else {
+ FAIL(expr, "invalid callee");
+ }
+}
+
+
+void AsmTyper::VisitCallNew(CallNew* expr) {
+ if (in_function_) {
+ FAIL(expr, "new not allowed in module function");
+ }
+ RECURSE(VisitWithExpectation(expr->expression(), Type::Any(),
+ "expected stdlib function"));
+ if (computed_type_->IsFunction()) {
+ Type::FunctionType* fun_type = computed_type_->AsFunction();
+ ZoneList<Expression*>* args = expr->arguments();
+ if (fun_type->Arity() != args->length())
+ FAIL(expr, "call with wrong arity");
+ for (int i = 0; i < args->length(); ++i) {
+ Expression* arg = args->at(i);
+ RECURSE(VisitWithExpectation(
+ arg, fun_type->Parameter(i),
+ "constructor argument expected to match callee parameter"));
+ }
+ IntersectResult(expr, fun_type->Result());
+ return;
+ }
+
+ FAIL(expr, "ill-typed new operator");
+}
+
+
+void AsmTyper::VisitCallRuntime(CallRuntime* expr) {
+ // Allow runtime calls for now.
+}
+
+
+void AsmTyper::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::NOT: // Used to encode != and !==
+ RECURSE(VisitWithExpectation(expr->expression(), cache_.kInt32,
+ "operand expected to be integer"));
+ IntersectResult(expr, cache_.kInt32);
+ return;
+ case Token::DELETE:
+ FAIL(expr, "delete operator encountered");
+ case Token::VOID:
+ FAIL(expr, "void operator encountered");
+ case Token::TYPEOF:
+ FAIL(expr, "typeof operator encountered");
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void AsmTyper::VisitCountOperation(CountOperation* expr) {
+ FAIL(expr, "increment or decrement operator encountered");
+}
+
+
+void AsmTyper::VisitBinaryOperation(BinaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::COMMA: {
+ RECURSE(VisitWithExpectation(expr->left(), Type::Any(),
+ "left comma operand expected to be any"));
+ RECURSE(VisitWithExpectation(expr->right(), Type::Any(),
+ "right comma operand expected to be any"));
+ IntersectResult(expr, computed_type_);
+ return;
+ }
+ case Token::OR:
+ case Token::AND:
+ FAIL(expr, "logical operator encountered");
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ // BIT_OR allows Any since it is used as a type coercion.
+ // BIT_XOR allows Number since it is used as a type coercion (encoding ~).
+ Type* expectation =
+ expr->op() == Token::BIT_OR
+ ? Type::Any()
+ : expr->op() == Token::BIT_XOR ? Type::Number() : cache_.kInt32;
+ Type* result =
+ expr->op() == Token::SHR ? Type::Unsigned32() : cache_.kInt32;
+ RECURSE(VisitWithExpectation(expr->left(), expectation,
+ "left bit operand expected to be integer"));
+ int left_intish = intish_;
+ RECURSE(VisitWithExpectation(expr->right(), expectation,
+ "right bit operand expected to be integer"));
+ int right_intish = intish_;
+ if (left_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr, "too many consecutive additive ops");
+ }
+ if (right_intish > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr, "too many consecutive additive ops");
+ }
+ intish_ = 0;
+ IntersectResult(expr, result);
+ return;
+ }
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD: {
+ RECURSE(VisitWithExpectation(
+ expr->left(), Type::Number(),
+ "left arithmetic operand expected to be number"));
+ Type* left_type = computed_type_;
+ int left_intish = intish_;
+ RECURSE(VisitWithExpectation(
+ expr->right(), Type::Number(),
+ "right arithmetic operand expected to be number"));
+ Type* right_type = computed_type_;
+ int right_intish = intish_;
+ Type* type = Type::Union(left_type, right_type, zone());
+ if (type->Is(cache_.kInt32)) {
+ if (expr->op() == Token::MUL) {
+ if (!expr->left()->IsLiteral() && !expr->right()->IsLiteral()) {
+ FAIL(expr, "direct integer multiply forbidden");
+ }
+ intish_ = 0;
+ IntersectResult(expr, cache_.kInt32);
+ return;
+ } else {
+ intish_ = left_intish + right_intish + 1;
+ if (expr->op() == Token::ADD || expr->op() == Token::SUB) {
+ if (intish_ > kMaxUncombinedAdditiveSteps) {
+ FAIL(expr, "too many consecutive additive ops");
+ }
+ } else {
+ if (intish_ > kMaxUncombinedMultiplicativeSteps) {
+ FAIL(expr, "too many consecutive multiplicative ops");
+ }
+ }
+ IntersectResult(expr, cache_.kInt32);
+ return;
+ }
+ } else if (type->Is(Type::Number())) {
+ IntersectResult(expr, cache_.kFloat64);
+ return;
+ } else {
+ FAIL(expr, "ill-typed arithmetic operation");
+ }
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void AsmTyper::VisitCompareOperation(CompareOperation* expr) {
+ RECURSE(
+ VisitWithExpectation(expr->left(), Type::Number(),
+ "left comparison operand expected to be number"));
+ Type* left_type = computed_type_;
+ RECURSE(
+ VisitWithExpectation(expr->right(), Type::Number(),
+ "right comparison operand expected to be number"));
+ Type* right_type = computed_type_;
+ Type* type = Type::Union(left_type, right_type, zone());
+ expr->set_combined_type(type);
+ if (type->Is(Type::Integral32()) || type->Is(Type::UntaggedFloat64())) {
+ IntersectResult(expr, cache_.kInt32);
+ } else {
+ FAIL(expr, "ill-typed comparison operation");
+ }
+}
+
+
+void AsmTyper::VisitThisFunction(ThisFunction* expr) {
+ FAIL(expr, "this function not allowed");
+}
+
+
+void AsmTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
+ for (int i = 0; i < decls->length(); ++i) {
+ Declaration* decl = decls->at(i);
+ RECURSE(Visit(decl));
+ }
+}
+
+
+void AsmTyper::VisitImportDeclaration(ImportDeclaration* decl) {
+ FAIL(decl, "import declaration encountered");
+}
+
+
+void AsmTyper::VisitExportDeclaration(ExportDeclaration* decl) {
+ FAIL(decl, "export declaration encountered");
+}
+
+
+void AsmTyper::VisitClassLiteral(ClassLiteral* expr) {
+ FAIL(expr, "class literal not allowed");
+}
+
+
+void AsmTyper::VisitSpread(Spread* expr) { FAIL(expr, "spread not allowed"); }
+
+
+void AsmTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {
+ FAIL(expr, "super property reference not allowed");
+}
+
+
+void AsmTyper::VisitSuperCallReference(SuperCallReference* expr) {
+ FAIL(expr, "call reference not allowed");
+}
+
+
+void AsmTyper::InitializeStdlib() {
+ Type* number_type = Type::Number(zone());
+ Type* double_type = cache_.kFloat64;
+ Type* double_fn1_type = Type::Function(double_type, double_type, zone());
+ Type* double_fn2_type =
+ Type::Function(double_type, double_type, double_type, zone());
+
+ Type* fround_type = Type::Function(cache_.kFloat32, number_type, zone());
+ Type* imul_type =
+ Type::Function(cache_.kInt32, cache_.kInt32, cache_.kInt32, zone());
+ // TODO(bradnelson): currently only approximating the proper intersection type
+ // (which we cannot currently represent).
+ Type* abs_type = Type::Function(number_type, number_type, zone());
+
+ struct Assignment {
+ const char* name;
+ Type* type;
+ };
+
+ const Assignment math[] = {
+ {"PI", double_type}, {"E", double_type},
+ {"LN2", double_type}, {"LN10", double_type},
+ {"LOG2E", double_type}, {"LOG10E", double_type},
+ {"SQRT2", double_type}, {"SQRT1_2", double_type},
+ {"imul", imul_type}, {"abs", abs_type},
+ {"ceil", double_fn1_type}, {"floor", double_fn1_type},
+ {"fround", fround_type}, {"pow", double_fn2_type},
+ {"exp", double_fn1_type}, {"log", double_fn1_type},
+ {"min", double_fn2_type}, {"max", double_fn2_type},
+ {"sqrt", double_fn1_type}, {"cos", double_fn1_type},
+ {"sin", double_fn1_type}, {"tan", double_fn1_type},
+ {"acos", double_fn1_type}, {"asin", double_fn1_type},
+ {"atan", double_fn1_type}, {"atan2", double_fn2_type}};
+ for (unsigned i = 0; i < arraysize(math); ++i) {
+ stdlib_math_types_[math[i].name] = math[i].type;
+ }
+
+ stdlib_types_["Infinity"] = double_type;
+ stdlib_types_["NaN"] = double_type;
+ Type* buffer_type = Type::Any(zone());
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ stdlib_types_[#TypeName "Array"] = \
+ Type::Function(cache_.k##TypeName##Array, buffer_type, zone());
+ TYPED_ARRAYS(TYPED_ARRAY)
+#undef TYPED_ARRAY
+
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ stdlib_heap_types_[#TypeName "Array"] = \
+ Type::Function(cache_.k##TypeName##Array, buffer_type, zone());
+ TYPED_ARRAYS(TYPED_ARRAY)
+#undef TYPED_ARRAY
+}
+
+
+Type* AsmTyper::LibType(ObjectTypeMap map, Handle<String> name) {
+ base::SmartArrayPointer<char> aname = name->ToCString();
+ ObjectTypeMap::iterator i = map.find(std::string(aname.get()));
+ if (i == map.end()) {
+ return NULL;
+ }
+ return i->second;
+}
+
+
+void AsmTyper::SetType(Variable* variable, Type* type) {
+ ZoneHashMap::Entry* entry;
+ if (in_function_) {
+ entry = local_variable_type_.LookupOrInsert(
+ variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
+ } else {
+ entry = global_variable_type_.LookupOrInsert(
+ variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone()));
+ }
+ entry->value = reinterpret_cast<void*>(type);
+}
+
+
+Type* AsmTyper::GetType(Variable* variable) {
+ i::ZoneHashMap::Entry* entry = NULL;
+ if (in_function_) {
+ entry = local_variable_type_.Lookup(variable, ComputePointerHash(variable));
+ }
+ if (entry == NULL) {
+ entry =
+ global_variable_type_.Lookup(variable, ComputePointerHash(variable));
+ }
+ if (entry == NULL) {
+ return NULL;
+ } else {
+ return reinterpret_cast<Type*>(entry->value);
+ }
+}
+
+
+void AsmTyper::SetResult(Expression* expr, Type* type) {
+ computed_type_ = type;
+ expr->set_bounds(Bounds(computed_type_));
+}
+
+
+void AsmTyper::IntersectResult(Expression* expr, Type* type) {
+ computed_type_ = type;
+ Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
+ expr->set_bounds(Bounds(bounded_type));
+}
+
+
+void AsmTyper::VisitWithExpectation(Expression* expr, Type* expected_type,
+ const char* msg) {
+ Type* save = expected_type_;
+ expected_type_ = expected_type;
+ RECURSE(Visit(expr));
+ Type* bounded_type = Type::Intersect(computed_type_, expected_type_, zone());
+ if (bounded_type->Is(Type::None(zone()))) {
+#ifdef DEBUG
+ PrintF("Computed type: ");
+ computed_type_->Print();
+ PrintF("Expected type: ");
+ expected_type_->Print();
+#endif
+ FAIL(expr, msg);
+ }
+ expected_type_ = save;
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/typing-asm.h b/deps/v8/src/typing-asm.h
new file mode 100644
index 0000000000..74c28fb3cf
--- /dev/null
+++ b/deps/v8/src/typing-asm.h
@@ -0,0 +1,95 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPING_ASM_H_
+#define V8_TYPING_ASM_H_
+
+#include "src/allocation.h"
+#include "src/ast.h"
+#include "src/effects.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class ZoneTypeCache;
+
+class AsmTyper : public AstVisitor {
+ public:
+ explicit AsmTyper(Isolate* isolate, Zone* zone, Script* script,
+ FunctionLiteral* root);
+ bool Validate();
+ const char* error_message() { return error_message_; }
+
+ DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+ Script* script_;
+ FunctionLiteral* root_;
+ bool valid_;
+
+ // Information for bi-directional typing with a cap on nesting depth.
+ Type* expected_type_;
+ Type* computed_type_;
+ int intish_; // How many ops we've gone without a x|0.
+
+ Type* return_type_; // Return type of last function.
+ size_t array_size_; // Array size of last ArrayLiteral.
+
+ typedef ZoneMap<std::string, Type*> ObjectTypeMap;
+ ObjectTypeMap stdlib_types_;
+ ObjectTypeMap stdlib_heap_types_;
+ ObjectTypeMap stdlib_math_types_;
+
+ // Map from Variable* to global/local variable Type*.
+ ZoneHashMap global_variable_type_;
+ ZoneHashMap local_variable_type_;
+
+ bool in_function_; // In module function?
+ bool building_function_tables_;
+
+ ZoneTypeCache const& cache_;
+
+ static const int kErrorMessageLimit = 100;
+ char error_message_[kErrorMessageLimit];
+
+ static const int kMaxUncombinedAdditiveSteps = 1 << 20;
+ static const int kMaxUncombinedMultiplicativeSteps = 1;
+
+ void InitializeStdlib();
+
+ void VisitDeclarations(ZoneList<Declaration*>* d) override;
+ void VisitStatements(ZoneList<Statement*>* s) override;
+
+ void VisitExpressionAnnotation(Expression* e);
+ void VisitFunctionAnnotation(FunctionLiteral* f);
+ void VisitAsmModule(FunctionLiteral* f);
+
+ void VisitHeapAccess(Property* expr);
+
+ int ElementShiftSize(Type* type);
+
+ void SetType(Variable* variable, Type* type);
+ Type* GetType(Variable* variable);
+
+ Type* LibType(ObjectTypeMap map, Handle<String> name);
+
+ void SetResult(Expression* expr, Type* type);
+ void IntersectResult(Expression* expr, Type* type);
+
+ void VisitWithExpectation(Expression* expr, Type* expected_type,
+ const char* msg);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) override;
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ DISALLOW_COPY_AND_ASSIGN(AsmTyper);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_TYPING_ASM_H_
diff --git a/deps/v8/src/typing-reset.cc b/deps/v8/src/typing-reset.cc
new file mode 100644
index 0000000000..af7641b485
--- /dev/null
+++ b/deps/v8/src/typing-reset.cc
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/typing-reset.h"
+
+#include "src/ast.h"
+#include "src/codegen.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+
+
+TypingReseter::TypingReseter(Isolate* isolate, Zone* zone,
+ FunctionLiteral* root)
+ : AstExpressionVisitor(isolate, zone, root) {}
+
+
+void TypingReseter::VisitExpression(Expression* expression) {
+ expression->set_bounds(Bounds::Unbounded());
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/typing-reset.h b/deps/v8/src/typing-reset.h
new file mode 100644
index 0000000000..b809eb2161
--- /dev/null
+++ b/deps/v8/src/typing-reset.h
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPING_RESET_H_
+#define V8_TYPING_RESET_H_
+
+#include "src/ast-expression-visitor.h"
+
+namespace v8 {
+namespace internal {
+
+// A Visitor over a CompilationInfo's AST that resets
+// typing bounds back to their default.
+
+class TypingReseter : public AstExpressionVisitor {
+ public:
+ TypingReseter(Isolate* isolate, Zone* zone, FunctionLiteral* root);
+
+ protected:
+ void VisitExpression(Expression* expression) override;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_TYPING_RESET_H_
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 204ace6c96..bd5114e89a 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -15,34 +15,19 @@ namespace v8 {
namespace internal {
-AstTyper::AstTyper(CompilationInfo* info)
- : info_(info),
- oracle_(info->isolate(), info->zone(),
- handle(info->closure()->shared()->code()),
- handle(info->closure()->shared()->feedback_vector()),
- handle(info->closure()->context()->native_context())),
- store_(info->zone()) {
- InitializeAstVisitor(info->isolate(), info->zone());
-}
-
-
-#define RECURSE(call) \
- do { \
- DCHECK(!visitor->HasStackOverflow()); \
- call; \
- if (visitor->HasStackOverflow()) return; \
- } while (false)
-
-void AstTyper::Run(CompilationInfo* info) {
- AstTyper* visitor = new(info->zone()) AstTyper(info);
- Scope* scope = info->scope();
-
- RECURSE(visitor->VisitDeclarations(scope->declarations()));
- RECURSE(visitor->VisitStatements(info->literal()->body()));
+AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
+ Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
+ : closure_(closure),
+ scope_(scope),
+ osr_ast_id_(osr_ast_id),
+ root_(root),
+ oracle_(isolate, zone, handle(closure->shared()->code()),
+ handle(closure->shared()->feedback_vector()),
+ handle(closure->context()->native_context())),
+ store_(zone) {
+ InitializeAstVisitor(isolate, zone);
}
-#undef RECURSE
-
#ifdef OBJECT_PRINT
static void PrintObserved(Variable* var, Object* value, Type* type) {
@@ -63,18 +48,17 @@ Effect AstTyper::ObservedOnStack(Object* value) {
void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
- if (stmt->OsrEntryId() != info_->osr_ast_id()) return;
+ if (stmt->OsrEntryId() != osr_ast_id_) return;
DisallowHeapAllocation no_gc;
JavaScriptFrameIterator it(isolate());
JavaScriptFrame* frame = it.frame();
- Scope* scope = info_->scope();
// Assert that the frame on the stack belongs to the function we want to OSR.
- DCHECK_EQ(*info_->closure(), frame->function());
+ DCHECK_EQ(*closure_, frame->function());
- int params = scope->num_parameters();
- int locals = scope->StackLocalCount();
+ int params = scope_->num_parameters();
+ int locals = scope_->StackLocalCount();
// Use sequential composition to achieve desired narrowing.
// The receiver is a parameter with index -1.
@@ -89,21 +73,19 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
#ifdef OBJECT_PRINT
if (FLAG_trace_osr && FLAG_print_scopes) {
- PrintObserved(scope->receiver(),
- frame->receiver(),
+ PrintObserved(scope_->receiver(), frame->receiver(),
store_.LookupBounds(parameter_index(-1)).lower);
for (int i = 0; i < params; i++) {
- PrintObserved(scope->parameter(i),
- frame->GetParameter(i),
+ PrintObserved(scope_->parameter(i), frame->GetParameter(i),
store_.LookupBounds(parameter_index(i)).lower);
}
ZoneList<Variable*> local_vars(locals, zone());
- ZoneList<Variable*> context_vars(scope->ContextLocalCount(), zone());
- ZoneList<Variable*> global_vars(scope->ContextGlobalCount(), zone());
- scope->CollectStackAndContextLocals(&local_vars, &context_vars,
- &global_vars);
+ ZoneList<Variable*> context_vars(scope_->ContextLocalCount(), zone());
+ ZoneList<Variable*> global_vars(scope_->ContextGlobalCount(), zone());
+ scope_->CollectStackAndContextLocals(&local_vars, &context_vars,
+ &global_vars);
for (int i = 0; i < locals; i++) {
PrintObserved(local_vars.at(i),
frame->GetExpression(i),
@@ -122,6 +104,12 @@ void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
} while (false)
+void AstTyper::Run() {
+ RECURSE(VisitDeclarations(scope_->declarations()));
+ RECURSE(VisitStatements(root_->body()));
+}
+
+
void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
@@ -148,6 +136,12 @@ void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
}
+void AstTyper::VisitSloppyBlockFunctionStatement(
+ SloppyBlockFunctionStatement* stmt) {
+ Visit(stmt->statement());
+}
+
+
void AstTyper::VisitIfStatement(IfStatement* stmt) {
// Collect type feedback.
if (!stmt->condition()->ToBooleanIsTrue() &&
@@ -408,8 +402,13 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
prop->emit_store()) {
// Record type feed back for the property.
TypeFeedbackId id = prop->key()->AsLiteral()->LiteralFeedbackId();
+ FeedbackVectorICSlot slot = prop->GetSlot();
SmallMapList maps;
- oracle()->CollectReceiverTypes(id, &maps);
+ if (FLAG_vector_stores) {
+ oracle()->CollectReceiverTypes(slot, &maps);
+ } else {
+ oracle()->CollectReceiverTypes(id, &maps);
+ }
prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
: Handle<Map>::null());
}
@@ -438,18 +437,31 @@ void AstTyper::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
if (prop != NULL) {
TypeFeedbackId id = expr->AssignmentFeedbackId();
- expr->set_is_uninitialized(oracle()->StoreIsUninitialized(id));
+ FeedbackVectorICSlot slot = expr->AssignmentSlot();
+ expr->set_is_uninitialized(FLAG_vector_stores
+ ? oracle()->StoreIsUninitialized(slot)
+ : oracle()->StoreIsUninitialized(id));
if (!expr->IsUninitialized()) {
+ SmallMapList* receiver_types = expr->GetReceiverTypes();
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
DCHECK(lit_key != NULL && lit_key->value()->IsString());
Handle<String> name = Handle<String>::cast(lit_key->value());
- oracle()->AssignmentReceiverTypes(id, name, expr->GetReceiverTypes());
+ if (FLAG_vector_stores) {
+ oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
+ } else {
+ oracle()->AssignmentReceiverTypes(id, name, receiver_types);
+ }
} else {
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
- oracle()->KeyedAssignmentReceiverTypes(id, expr->GetReceiverTypes(),
- &store_mode, &key_type);
+ if (FLAG_vector_stores) {
+ oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
+ &store_mode, &key_type);
+ } else {
+ oracle()->KeyedAssignmentReceiverTypes(id, receiver_types,
+ &store_mode, &key_type);
+ }
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
}
@@ -549,8 +561,7 @@ void AstTyper::VisitCall(Call* expr) {
void AstTyper::VisitCallNew(CallNew* expr) {
// Collect type feedback.
FeedbackVectorSlot allocation_site_feedback_slot =
- FLAG_pretenuring_call_new ? expr->AllocationSiteFeedbackSlot()
- : expr->CallNewFeedbackSlot();
+ expr->CallNewFeedbackSlot();
expr->set_allocation_site(
oracle()->GetCallNewAllocationSite(allocation_site_feedback_slot));
bool monomorphic =
@@ -611,12 +622,18 @@ void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
void AstTyper::VisitCountOperation(CountOperation* expr) {
// Collect type feedback.
TypeFeedbackId store_id = expr->CountStoreFeedbackId();
+ FeedbackVectorICSlot slot = expr->CountSlot();
KeyedAccessStoreMode store_mode;
IcCheckType key_type;
- oracle()->GetStoreModeAndKeyType(store_id, &store_mode, &key_type);
+ if (FLAG_vector_stores) {
+ oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
+ oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
+ } else {
+ oracle()->GetStoreModeAndKeyType(store_id, &store_mode, &key_type);
+ oracle()->CountReceiverTypes(store_id, expr->GetReceiverTypes());
+ }
expr->set_store_mode(store_mode);
expr->set_key_type(key_type);
- oracle()->CountReceiverTypes(store_id, expr->GetReceiverTypes());
expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
// TODO(rossberg): merge the count type with the generic expression type.
@@ -753,6 +770,11 @@ void AstTyper::VisitCompareOperation(CompareOperation* expr) {
void AstTyper::VisitSpread(Spread* expr) { RECURSE(Visit(expr->expression())); }
+void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
+ UNREACHABLE();
+}
+
+
void AstTyper::VisitThisFunction(ThisFunction* expr) {
}
diff --git a/deps/v8/src/typing.h b/deps/v8/src/typing.h
index f3ead18f99..8b3e97b67c 100644
--- a/deps/v8/src/typing.h
+++ b/deps/v8/src/typing.h
@@ -19,17 +19,13 @@ namespace internal {
class AstTyper: public AstVisitor {
public:
- static void Run(CompilationInfo* info);
-
- void* operator new(size_t size, Zone* zone) { return zone->New(size); }
- void operator delete(void* pointer, Zone* zone) { }
- void operator delete(void* pointer) { }
+ AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
+ Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root);
+ void Run();
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
private:
- explicit AstTyper(CompilationInfo* info);
-
Effect ObservedOnStack(Object* value);
void ObserveTypesAtOsrEntry(IterationStatement* stmt);
@@ -37,7 +33,10 @@ class AstTyper: public AstVisitor {
typedef v8::internal::Effects<int, kNoVar> Effects;
typedef v8::internal::NestedEffects<int, kNoVar> Store;
- CompilationInfo* info_;
+ Handle<JSFunction> closure_;
+ Scope* scope_;
+ BailoutId osr_ast_id_;
+ FunctionLiteral* root_;
TypeFeedbackOracle oracle_;
Store store_;
diff --git a/deps/v8/src/unicode-cache-inl.h b/deps/v8/src/unicode-cache-inl.h
new file mode 100644
index 0000000000..c5a8a69dab
--- /dev/null
+++ b/deps/v8/src/unicode-cache-inl.h
@@ -0,0 +1,49 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNICODE_CACHE_INL_H_
+#define V8_UNICODE_CACHE_INL_H_
+
+#include "src/unicode-inl.h"
+#include "src/unicode-cache.h"
+
+namespace v8 {
+namespace internal {
+
+bool UnicodeCache::IsIdentifierStart(unibrow::uchar c) {
+ return kIsIdentifierStart.get(c);
+}
+
+
+bool UnicodeCache::IsIdentifierPart(unibrow::uchar c) {
+ return kIsIdentifierPart.get(c);
+}
+
+
+bool UnicodeCache::IsLineTerminator(unibrow::uchar c) {
+ return kIsLineTerminator.get(c);
+}
+
+
+bool UnicodeCache::IsLineTerminatorSequence(unibrow::uchar c,
+ unibrow::uchar next) {
+ if (!IsLineTerminator(c)) return false;
+ if (c == 0x000d && next == 0x000a) return false; // CR with following LF.
+ return true;
+}
+
+
+bool UnicodeCache::IsWhiteSpace(unibrow::uchar c) {
+ return kIsWhiteSpace.get(c);
+}
+
+
+bool UnicodeCache::IsWhiteSpaceOrLineTerminator(unibrow::uchar c) {
+ return kIsWhiteSpaceOrLineTerminator.get(c);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNICODE_CACHE_INL_H_
diff --git a/deps/v8/src/unicode-cache.h b/deps/v8/src/unicode-cache.h
new file mode 100644
index 0000000000..849025e4cb
--- /dev/null
+++ b/deps/v8/src/unicode-cache.h
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNICODE_CACHE_H_
+#define V8_UNICODE_CACHE_H_
+
+#include "src/base/macros.h"
+#include "src/char-predicates.h"
+#include "src/unicode.h"
+#include "src/unicode-decoder.h"
+
+namespace v8 {
+namespace internal {
+
+// Caching predicates used by scanners.
+class UnicodeCache {
+ public:
+ UnicodeCache() {}
+ typedef unibrow::Utf8Decoder<512> Utf8Decoder;
+
+ StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; }
+
+ inline bool IsIdentifierStart(unibrow::uchar c);
+ inline bool IsIdentifierPart(unibrow::uchar c);
+ inline bool IsLineTerminator(unibrow::uchar c);
+ inline bool IsLineTerminatorSequence(unibrow::uchar c, unibrow::uchar next);
+
+ inline bool IsWhiteSpace(unibrow::uchar c);
+ inline bool IsWhiteSpaceOrLineTerminator(unibrow::uchar c);
+
+ private:
+ unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
+ unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
+ unibrow::Predicate<unibrow::LineTerminator, 128> kIsLineTerminator;
+ unibrow::Predicate<WhiteSpace, 128> kIsWhiteSpace;
+ unibrow::Predicate<WhiteSpaceOrLineTerminator, 128>
+ kIsWhiteSpaceOrLineTerminator;
+ StaticResource<Utf8Decoder> utf8_decoder_;
+
+ DISALLOW_COPY_AND_ASSIGN(UnicodeCache);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNICODE_CACHE_H_
diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc
index df45697bde..de5e36038b 100644
--- a/deps/v8/src/unicode.cc
+++ b/deps/v8/src/unicode.cc
@@ -4,6 +4,7 @@
//
// This file was generated at 2014-10-08 15:25:47.940335
+#include "src/unicode.h"
#include "src/unicode-inl.h"
#include <stdio.h>
#include <stdlib.h>
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index 68fb86956c..8805218b1f 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -2,14 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_HYDROGEN_UNIQUE_H_
-#define V8_HYDROGEN_UNIQUE_H_
+#ifndef V8_UNIQUE_H_
+#define V8_UNIQUE_H_
#include <ostream> // NOLINT(readability/streams)
#include "src/base/functional.h"
#include "src/handles.h"
-#include "src/objects-inl.h" // TODO(everyone): Fix our inl.h crap
#include "src/utils.h"
#include "src/zone.h"
@@ -32,7 +31,7 @@ class UniqueSet;
// Careful! Comparison of two Uniques is only correct if both were created
// in the same "era" of GC or if at least one is a non-movable object.
template <typename T>
-class Unique {
+class Unique final {
public:
Unique<T>() : raw_address_(NULL) {}
@@ -54,10 +53,6 @@ class Unique {
handle_ = handle;
}
- // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
- Unique(Address raw_address, Handle<T> handle)
- : raw_address_(raw_address), handle_(handle) { }
-
// Constructor for handling automatic up casting.
// Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
template <class S> Unique(Unique<S> uniq) {
@@ -129,15 +124,16 @@ class Unique {
return Unique<T>(reinterpret_cast<Address>(*handle), handle);
}
- friend class UniqueSet<T>; // Uses internal details for speed.
- template <class U>
- friend class Unique; // For comparing raw_address values.
+ private:
+ Unique(Address raw_address, Handle<T> handle)
+ : raw_address_(raw_address), handle_(handle) {}
- protected:
Address raw_address_;
Handle<T> handle_;
- friend class SideEffectsTracker;
+ friend class UniqueSet<T>; // Uses internal details for speed.
+ template <class U>
+ friend class Unique; // For comparing raw_address values.
};
template <typename T>
@@ -361,4 +357,4 @@ class UniqueSet final : public ZoneObject {
} } // namespace v8::internal
-#endif // V8_HYDROGEN_UNIQUE_H_
+#endif // V8_UNIQUE_H_
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index bf3270f1d0..bdb83d1431 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -17,11 +17,6 @@
var GlobalObject = global.Object;
var GlobalArray = global.Array;
var InternalArray = utils.InternalArray;
-var ToString;
-
-utils.Import(function(from) {
- ToString = from.ToString;
-});
// -------------------------------------------------------------------
// Define internal helper functions.
@@ -169,7 +164,7 @@ function URIDecodeOctets(octets, result, index) {
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
- uri = TO_STRING_INLINE(uri);
+ uri = TO_STRING(uri);
var uriLength = uri.length;
var array = new InternalArray(uriLength);
var index = 0;
@@ -200,7 +195,7 @@ function Encode(uri, unescape) {
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
- uri = TO_STRING_INLINE(uri);
+ uri = TO_STRING(uri);
var uriLength = uri.length;
var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
var index = 0;
@@ -278,14 +273,12 @@ function Decode(uri, reserved) {
// Define exported functions.
// ECMA-262 - B.2.1.
-function URIEscapeJS(str) {
- var s = ToString(str);
+function URIEscapeJS(s) {
return %URIEscape(s);
}
// ECMA-262 - B.2.2.
-function URIUnescapeJS(str) {
- var s = ToString(str);
+function URIUnescapeJS(s) {
return %URIUnescape(s);
}
@@ -309,15 +302,13 @@ function URIDecode(uri) {
return false;
};
- var string = ToString(uri);
- return Decode(string, reservedPredicate);
+ return Decode(uri, reservedPredicate);
}
// ECMA-262 - 15.1.3.2.
function URIDecodeComponent(component) {
var reservedPredicate = function(cc) { return false; };
- var string = ToString(component);
- return Decode(string, reservedPredicate);
+ return Decode(component, reservedPredicate);
}
// ECMA-262 - 15.1.3.3.
@@ -343,8 +334,7 @@ function URIEncode(uri) {
return false;
};
- var string = ToString(uri);
- return Encode(string, unescapePredicate);
+ return Encode(uri, unescapePredicate);
}
// ECMA-262 - 15.1.3.4
@@ -364,8 +354,7 @@ function URIEncodeComponent(component) {
return false;
};
- var string = ToString(component);
- return Encode(string, unescapePredicate);
+ return Encode(component, unescapePredicate);
}
// -------------------------------------------------------------------
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 582c576993..ef35f96964 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -1084,6 +1084,7 @@ class BailoutId {
int ToInt() const { return id_; }
static BailoutId None() { return BailoutId(kNoneId); }
+ static BailoutId Prologue() { return BailoutId(kPrologueId); }
static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); }
static BailoutId Declarations() { return BailoutId(kDeclarationsId); }
static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); }
@@ -1099,6 +1100,7 @@ class BailoutId {
static const int kNoneId = -1;
// Using 0 could disguise errors.
+ static const int kPrologueId = 1;
static const int kFunctionEntryId = 2;
// This AST id identifies the point after the declarations have been visited.
@@ -1210,17 +1212,6 @@ int WriteAsCFile(const char* filename, const char* varname,
// ----------------------------------------------------------------------------
-// Data structures
-
-template <typename T>
-inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
- int length) {
- return Vector< Handle<Object> >(
- reinterpret_cast<v8::internal::Handle<Object>*>(elms), length);
-}
-
-
-// ----------------------------------------------------------------------------
// Memory
// Copies words from |src| to |dst|. The data spans must not overlap.
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index bd6f2c2b28..760a9b564d 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -12,13 +12,13 @@
#include "src/deoptimizer.h"
#include "src/elements.h"
#include "src/frames.h"
-#include "src/heap-profiler.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
#include "src/lithium-allocator.h"
#include "src/objects.h"
+#include "src/profiler/heap-profiler.h"
+#include "src/profiler/sampler.h"
#include "src/runtime-profiler.h"
-#include "src/sampler.h"
#include "src/snapshot/natives.h"
#include "src/snapshot/serialize.h"
#include "src/snapshot/snapshot.h"
@@ -55,12 +55,6 @@ void V8::TearDown() {
}
-void V8::SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver resolver) {
- StackFrame::SetReturnAddressLocationResolver(resolver);
-}
-
-
void V8::InitializeOncePerProcessImpl() {
FlagList::EnforceFlagImplications();
@@ -125,6 +119,9 @@ v8::Platform* V8::GetCurrentPlatform() {
}
+void V8::SetPlatformForTesting(v8::Platform* platform) { platform_ = platform; }
+
+
void V8::SetNativesBlob(StartupData* natives_blob) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
base::CallOnce(&init_natives_once, &SetNativesFromFile, natives_blob);
diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h
index 23e1a1230c..f5b3b84735 100644
--- a/deps/v8/src/v8.h
+++ b/deps/v8/src/v8.h
@@ -2,36 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Top include for all V8 .cc files.
-//
-
#ifndef V8_V8_H_
#define V8_V8_H_
-#if defined(GOOGLE3) || defined(DCHECK_ALWAYS_ON)
-// Google3 and Chromium special flag handling.
-#if defined(DEBUG) && defined(NDEBUG)
-// V8 only uses DEBUG and whenever it is set we are building a debug
-// version of V8. We do not use NDEBUG and simply undef it here for
-// consistency.
-#undef NDEBUG
-#endif
-#endif // defined(GOOGLE3)
-
-// V8 only uses DEBUG, but included external files
-// may use NDEBUG - make sure they are consistent.
-#if defined(DEBUG) && defined(NDEBUG)
-#error both DEBUG and NDEBUG are set
-#endif
-
-// Basic includes
#include "include/v8.h"
#include "src/allocation.h"
-// Objects
-#include "src/objects-inl.h"
-
namespace v8 {
namespace internal {
@@ -47,18 +23,12 @@ class V8 : public AllStatic {
static void FatalProcessOutOfMemory(const char* location,
bool take_snapshot = false);
- // Allows an entropy source to be provided for use in random number
- // generation.
- static void SetEntropySource(EntropySource source);
- // Support for return-address rewriting profilers.
- static void SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver resolver);
- // Support for entry hooking JITed code.
- static void SetFunctionEntryHook(FunctionEntryHook entry_hook);
-
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
static v8::Platform* GetCurrentPlatform();
+ // Replaces the current platform with the given platform.
+ // Should be used only for testing.
+ static void SetPlatformForTesting(v8::Platform* platform);
static void SetNativesBlob(StartupData* natives_blob);
static void SetSnapshotBlob(StartupData* snapshot_blob);
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 93636d008f..37e6f1bcce 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -9,33 +9,30 @@
// ----------------------------------------------------------------------------
// Imports
+var FLAG_harmony_tostring;
var GlobalArray = global.Array;
var GlobalBoolean = global.Boolean;
var GlobalFunction = global.Function;
var GlobalNumber = global.Number;
var GlobalObject = global.Object;
var InternalArray = utils.InternalArray;
+var iteratorSymbol = utils.ImportNow("iterator_symbol");
var MathAbs;
var ProxyDelegateCallAndConstruct;
var ProxyDerivedHasOwnTrap;
var ProxyDerivedKeysTrap;
var StringIndexOf;
-var ToBoolean;
-var ToNumber;
-var ToString;
+var ToBoolean = utils.ImportNow("ToBoolean");
+var ToNumber = utils.ImportNow("ToNumber");
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
utils.Import(function(from) {
MathAbs = from.MathAbs;
StringIndexOf = from.StringIndexOf;
- ToString = from.ToString;
-});
-
-utils.ImportNow(function(from) {
- ToBoolean = from.ToBoolean;
- ToNumber = from.ToNumber;
});
utils.ImportFromExperimental(function(from) {
+ FLAG_harmony_tostring = from.FLAG_harmony_tostring;
ProxyDelegateCallAndConstruct = from.ProxyDelegateCallAndConstruct;
ProxyDerivedHasOwnTrap = from.ProxyDerivedHasOwnTrap;
ProxyDerivedKeysTrap = from.ProxyDerivedKeysTrap;
@@ -73,11 +70,11 @@ function GlobalParseInt(string, radix) {
// Truncate number.
return string | 0;
}
- string = TO_STRING_INLINE(string);
+ string = TO_STRING(string);
radix = radix | 0;
} else {
// The spec says ToString should be evaluated before ToInt32.
- string = TO_STRING_INLINE(string);
+ string = TO_STRING(string);
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36))) {
return NAN;
@@ -94,7 +91,7 @@ function GlobalParseInt(string, radix) {
// ECMA-262 - 15.1.2.3
function GlobalParseFloat(string) {
- string = TO_STRING_INLINE(string);
+ string = TO_STRING(string);
if (%_HasCachedArrayIndex(string)) return %_GetCachedArrayIndex(string);
return %StringParseFloat(string);
}
@@ -148,8 +145,8 @@ function ObjectToString() {
var tag;
// TODO(caitp): cannot wait to get rid of this flag :>
- if (harmony_tostring) {
- tag = O[symbolToStringTag];
+ if (FLAG_harmony_tostring) {
+ tag = O[toStringTagSymbol];
if (!IS_STRING(tag)) {
tag = builtinTag;
}
@@ -176,7 +173,7 @@ function ObjectValueOf() {
// ECMA-262 - 15.2.4.5
function ObjectHasOwnProperty(value) {
- var name = $toName(value);
+ var name = TO_NAME(value);
var object = TO_OBJECT(this);
if (%_IsJSProxy(object)) {
@@ -193,14 +190,14 @@ function ObjectHasOwnProperty(value) {
// ECMA-262 - 15.2.4.6
function ObjectIsPrototypeOf(V) {
if (!IS_SPEC_OBJECT(V)) return false;
- CHECK_OBJECT_COERCIBLE(this, "Object.prototype.isPrototypeOf");
- return %IsInPrototypeChain(this, V);
+ var O = TO_OBJECT(this);
+ return %_HasInPrototypeChain(V, O);
}
// ECMA-262 - 15.2.4.6
function ObjectPropertyIsEnumerable(V) {
- var P = $toName(V);
+ var P = TO_NAME(V);
if (%_IsJSProxy(this)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(V)) return false;
@@ -218,14 +215,14 @@ function ObjectDefineGetter(name, fun) {
if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectDefineGetter);
}
- if (!IS_SPEC_FUNCTION(fun)) {
+ if (!IS_CALLABLE(fun)) {
throw MakeTypeError(kObjectGetterExpectingFunction);
}
var desc = new PropertyDescriptor();
desc.setGet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(TO_OBJECT(receiver), $toName(name), desc, false);
+ DefineOwnProperty(TO_OBJECT(receiver), TO_NAME(name), desc, false);
}
@@ -234,7 +231,7 @@ function ObjectLookupGetter(name) {
if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectLookupGetter);
}
- return %LookupAccessor(TO_OBJECT(receiver), $toName(name), GETTER);
+ return %LookupAccessor(TO_OBJECT(receiver), TO_NAME(name), GETTER);
}
@@ -243,14 +240,14 @@ function ObjectDefineSetter(name, fun) {
if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectDefineSetter);
}
- if (!IS_SPEC_FUNCTION(fun)) {
+ if (!IS_CALLABLE(fun)) {
throw MakeTypeError(kObjectSetterExpectingFunction);
}
var desc = new PropertyDescriptor();
desc.setSet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(TO_OBJECT(receiver), $toName(name), desc, false);
+ DefineOwnProperty(TO_OBJECT(receiver), TO_NAME(name), desc, false);
}
@@ -259,7 +256,7 @@ function ObjectLookupSetter(name) {
if (IS_NULL(receiver) || IS_UNDEFINED(receiver)) {
receiver = %GlobalProxy(ObjectLookupSetter);
}
- return %LookupAccessor(TO_OBJECT(receiver), $toName(name), SETTER);
+ return %LookupAccessor(TO_OBJECT(receiver), TO_NAME(name), SETTER);
}
@@ -369,7 +366,7 @@ function ToPropertyDescriptor(obj) {
if ("get" in obj) {
var get = obj.get;
- if (!IS_UNDEFINED(get) && !IS_SPEC_FUNCTION(get)) {
+ if (!IS_UNDEFINED(get) && !IS_CALLABLE(get)) {
throw MakeTypeError(kObjectGetterCallable, get);
}
desc.setGet(get);
@@ -377,7 +374,7 @@ function ToPropertyDescriptor(obj) {
if ("set" in obj) {
var set = obj.set;
- if (!IS_UNDEFINED(set) && !IS_SPEC_FUNCTION(set)) {
+ if (!IS_UNDEFINED(set) && !IS_CALLABLE(set)) {
throw MakeTypeError(kObjectSetterCallable, set);
}
desc.setSet(set);
@@ -537,7 +534,7 @@ function GetTrap(handler, name, defaultTrap) {
throw MakeTypeError(kProxyHandlerTrapMissing, handler, name);
}
trap = defaultTrap;
- } else if (!IS_SPEC_FUNCTION(trap)) {
+ } else if (!IS_CALLABLE(trap)) {
throw MakeTypeError(kProxyHandlerTrapMustBeCallable, handler, name);
}
return trap;
@@ -561,7 +558,7 @@ function CallTrap2(handler, name, defaultTrap, x, y) {
// ES5 section 8.12.1.
function GetOwnPropertyJS(obj, v) {
- var p = $toName(v);
+ var p = TO_NAME(v);
if (%_IsJSProxy(obj)) {
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(v)) return UNDEFINED;
@@ -606,7 +603,7 @@ function Delete(obj, p, should_throw) {
function GetMethod(obj, p) {
var func = obj[p];
if (IS_NULL_OR_UNDEFINED(func)) return UNDEFINED;
- if (IS_SPEC_FUNCTION(func)) return func;
+ if (IS_CALLABLE(func)) return func;
throw MakeTypeError(kCalledNonCallable, typeof func);
}
@@ -632,7 +629,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_array = %GetOwnProperty(obj, $toName(p));
+ var current_array = %GetOwnProperty(obj, TO_NAME(p));
var current = ConvertDescriptorArrayToDescriptor(current_array);
var extensible = %IsExtensible(obj);
@@ -807,7 +804,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
if (!IS_SYMBOL(p)) {
var index = TO_UINT32(p);
var emit_splice = false;
- if (ToString(index) == p && index != 4294967295) {
+ if (TO_STRING(index) == p && index != 4294967295) {
var length = obj.length;
if (index >= length && %IsObserved(obj)) {
emit_splice = true;
@@ -906,7 +903,7 @@ function ToNameArray(obj, trap, includeSymbols) {
var realLength = 0;
var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
for (var index = 0; index < n; index++) {
- var s = $toName(obj[index]);
+ var s = TO_NAME(obj[index]);
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
if (IS_SYMBOL(s) && !includeSymbols) continue;
if (%HasOwnProperty(names, s)) {
@@ -975,7 +972,7 @@ function ObjectGetOwnPropertyKeys(obj, filter) {
}
} else {
if (filter & PROPERTY_ATTRIBUTES_STRING) continue;
- name = ToString(name);
+ name = TO_STRING(name);
}
if (seenKeys[name]) continue;
seenKeys[name] = true;
@@ -1032,7 +1029,7 @@ function ObjectDefineProperty(obj, p, attributes) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError(kCalledOnNonObject, "Object.defineProperty");
}
- var name = $toName(p);
+ var name = TO_NAME(p);
if (%_IsJSProxy(obj)) {
// Clone the attributes object for protection.
// TODO(rossberg): not spec'ed yet, so not sure if this should involve
@@ -1145,7 +1142,7 @@ function ObjectSealJS(obj) {
if (isProxy) {
ProxyFix(obj);
}
- var names = ObjectGetOwnPropertyNames(obj);
+ var names = OwnPropertyKeys(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnPropertyJS(obj, name);
@@ -1175,7 +1172,7 @@ function ObjectFreezeJS(obj) {
if (isProxy) {
ProxyFix(obj);
}
- var names = ObjectGetOwnPropertyNames(obj);
+ var names = OwnPropertyKeys(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnPropertyJS(obj, name);
@@ -1215,7 +1212,7 @@ function ObjectIsSealed(obj) {
if (%IsExtensible(obj)) {
return false;
}
- var names = ObjectGetOwnPropertyNames(obj);
+ var names = OwnPropertyKeys(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnPropertyJS(obj, name);
@@ -1236,7 +1233,7 @@ function ObjectIsFrozen(obj) {
if (%IsExtensible(obj)) {
return false;
}
- var names = ObjectGetOwnPropertyNames(obj);
+ var names = OwnPropertyKeys(obj);
for (var i = 0; i < names.length; i++) {
var name = names[i];
var desc = GetOwnPropertyJS(obj, name);
@@ -1263,6 +1260,36 @@ function ObjectIs(obj1, obj2) {
}
+// ECMA-262, Edition 6, section 19.1.2.1
+function ObjectAssign(target, sources) {
+ // TODO(bmeurer): Move this to toplevel.
+ "use strict";
+ var to = TO_OBJECT(target);
+ var argsLen = %_ArgumentsLength();
+ if (argsLen < 2) return to;
+
+ for (var i = 1; i < argsLen; ++i) {
+ var nextSource = %_Arguments(i);
+ if (IS_NULL_OR_UNDEFINED(nextSource)) {
+ continue;
+ }
+
+ var from = TO_OBJECT(nextSource);
+ var keys = OwnPropertyKeys(from);
+ var len = keys.length;
+
+ for (var j = 0; j < len; ++j) {
+ var key = keys[j];
+ if (%IsPropertyEnumerable(from, key)) {
+ var propValue = from[key];
+ to[key] = propValue;
+ }
+ }
+ }
+ return to;
+}
+
+
// ECMA-262, Edition 6, section B.2.2.1.1
function ObjectGetProto() {
return %_GetPrototype(TO_OBJECT(this));
@@ -1317,6 +1344,7 @@ utils.InstallGetterSetter(GlobalObject.prototype, "__proto__", ObjectGetProto,
// Set up non-enumerable functions in the Object object.
utils.InstallFunctions(GlobalObject, DONT_ENUM, [
+ "assign", ObjectAssign,
"keys", ObjectKeys,
"create", ObjectCreate,
"defineProperty", ObjectDefineProperty,
@@ -1342,6 +1370,8 @@ utils.InstallFunctions(GlobalObject, DONT_ENUM, [
// Boolean
function BooleanConstructor(x) {
+ // TODO(bmeurer): Move this to toplevel.
+ "use strict";
if (%_IsConstructCall()) {
%_SetValueOf(this, ToBoolean(x));
} else {
@@ -1391,6 +1421,8 @@ utils.InstallFunctions(GlobalBoolean.prototype, DONT_ENUM, [
// Number
function NumberConstructor(x) {
+ // TODO(bmeurer): Move this to toplevel.
+ "use strict";
var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
@@ -1504,7 +1536,7 @@ function NumberToPrecisionJS(precision) {
// Get the value of this number in case it's an object.
x = %_ValueOf(this);
}
- if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
+ if (IS_UNDEFINED(precision)) return TO_STRING(x);
var p = TO_INTEGER(precision);
if (NUMBER_IS_NAN(x)) return "NaN";
@@ -1541,7 +1573,7 @@ function NumberIsSafeInteger(number) {
if (NumberIsFinite(number)) {
var integral = TO_INTEGER(number);
if (integral == number) {
- return MathAbs(integral) <= GlobalNumber.MAX_SAFE_INTEGER;
+ return MathAbs(integral) <= kMaxSafeInteger;
}
}
return false;
@@ -1622,7 +1654,7 @@ function FunctionSourceString(func) {
throw MakeTypeError(kNotGeneric, 'Function.prototype.toString');
}
- if (%FunctionIsBuiltin(func)) {
+ if (%FunctionHidesSource(func)) {
return NativeCodeFunctionSourceString(func);
}
@@ -1659,7 +1691,7 @@ function FunctionToString() {
// ES5 15.3.4.5
function FunctionBind(this_arg) { // Length is 1.
- if (!IS_SPEC_FUNCTION(this)) throw MakeTypeError(kFunctionBind);
+ if (!IS_CALLABLE(this)) throw MakeTypeError(kFunctionBind);
var boundFunction = function () {
// Poison .arguments and .caller, but is otherwise not detectable.
@@ -1724,9 +1756,9 @@ function NewFunctionString(args, function_token) {
var n = args.length;
var p = '';
if (n > 1) {
- p = ToString(args[0]);
+ p = TO_STRING(args[0]);
for (var i = 1; i < n - 1; i++) {
- p += ',' + ToString(args[i]);
+ p += ',' + TO_STRING(args[i]);
}
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
@@ -1739,7 +1771,7 @@ function NewFunctionString(args, function_token) {
// comments we can include a trailing block comment to catch this.
p += '\n/' + '**/';
}
- var body = (n > 0) ? ToString(args[n - 1]) : '';
+ var body = (n > 0) ? TO_STRING(args[n - 1]) : '';
return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
}
@@ -1773,9 +1805,9 @@ utils.InstallFunctions(GlobalFunction.prototype, DONT_ENUM, [
// 7.4.1 GetIterator ( obj, method )
function GetIterator(obj, method) {
if (IS_UNDEFINED(method)) {
- method = obj[symbolIterator];
+ method = obj[iteratorSymbol];
}
- if (!IS_SPEC_FUNCTION(method)) {
+ if (!IS_CALLABLE(method)) {
throw MakeTypeError(kNotIterable, obj);
}
var iterator = %_CallFunction(obj, method);
@@ -1805,15 +1837,16 @@ utils.Export(function(to) {
to.ObjectIsFrozen = ObjectIsFrozen;
to.ObjectIsSealed = ObjectIsSealed;
to.ObjectToString = ObjectToString;
- to.OwnPropertyKeys = OwnPropertyKeys;
to.ToNameArray = ToNameArray;
});
-utils.ExportToRuntime(function(to) {
- to.GlobalEval = GlobalEval;
- to.ObjectDefineOwnProperty = DefineOwnPropertyFromAPI;
- to.ObjectGetOwnPropertyDescriptor = ObjectGetOwnPropertyDescriptor;
- to.ToCompletePropertyDescriptor = ToCompletePropertyDescriptor;
-});
+%InstallToContext([
+ "global_eval_fun", GlobalEval,
+ "object_value_of", ObjectValueOf,
+ "object_to_string", ObjectToString,
+ "object_define_own_property", DefineOwnPropertyFromAPI,
+ "object_get_own_property_descriptor", ObjectGetOwnPropertyDescriptor,
+ "to_complete_property_descriptor", ToCompletePropertyDescriptor,
+]);
})
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 45c1c8a25c..34d26ece7f 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -2,14 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/v8threads.h"
#include "src/api.h"
#include "src/bootstrapper.h"
#include "src/debug/debug.h"
#include "src/execution.h"
+#include "src/isolate-inl.h"
#include "src/regexp/regexp-stack.h"
-#include "src/v8threads.h"
namespace v8 {
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index c3ba517375..3e189d5cb4 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -5,6 +5,8 @@
#ifndef V8_V8THREADS_H_
#define V8_V8THREADS_H_
+#include "src/isolate.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index eae80c85cc..7305bf2576 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8-version.h"
-#include "src/v8.h"
#include "src/version.h"
+#include "include/v8-version.h"
+#include "src/utils.h"
+
// Define SONAME to have the build system put a specific SONAME into the
// shared library instead the generic SONAME generated from the V8 version
// number. This define is mainly used by the build system script.
diff --git a/deps/v8/src/version.h b/deps/v8/src/version.h
index dbcec1b27d..2596beeb8a 100644
--- a/deps/v8/src/version.h
+++ b/deps/v8/src/version.h
@@ -6,6 +6,7 @@
#define V8_VERSION_H_
#include "src/base/functional.h"
+#include "src/vector.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/weak-collection.js b/deps/v8/src/weak-collection.js
index 75350931ed..1c60a2f47a 100644
--- a/deps/v8/src/weak-collection.js
+++ b/deps/v8/src/weak-collection.js
@@ -11,6 +11,7 @@
var GlobalObject = global.Object;
var GlobalWeakMap = global.WeakMap;
var GlobalWeakSet = global.WeakSet;
+var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
// -------------------------------------------------------------------
// Harmony WeakMap
@@ -24,14 +25,14 @@ function WeakMapConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.set;
- if (!IS_SPEC_FUNCTION(adder)) {
+ if (!IS_CALLABLE(adder)) {
throw MakeTypeError(kPropertyNotFunction, 'set', this);
}
for (var nextItem of iterable) {
if (!IS_SPEC_OBJECT(nextItem)) {
throw MakeTypeError(kIteratorValueNotAnObject, nextItem);
}
- %_CallFunction(this, nextItem[0], nextItem[1], adder);
+ %_Call(adder, this, nextItem[0], nextItem[1]);
}
}
}
@@ -90,7 +91,7 @@ function WeakMapDelete(key) {
%FunctionSetPrototype(GlobalWeakMap, new GlobalObject());
%AddNamedProperty(GlobalWeakMap.prototype, "constructor", GlobalWeakMap,
DONT_ENUM);
-%AddNamedProperty(GlobalWeakMap.prototype, symbolToStringTag, "WeakMap",
+%AddNamedProperty(GlobalWeakMap.prototype, toStringTagSymbol, "WeakMap",
DONT_ENUM | READ_ONLY);
// Set up the non-enumerable functions on the WeakMap prototype object.
@@ -113,11 +114,11 @@ function WeakSetConstructor(iterable) {
if (!IS_NULL_OR_UNDEFINED(iterable)) {
var adder = this.add;
- if (!IS_SPEC_FUNCTION(adder)) {
+ if (!IS_CALLABLE(adder)) {
throw MakeTypeError(kPropertyNotFunction, 'add', this);
}
for (var value of iterable) {
- %_CallFunction(this, value, adder);
+ %_Call(adder, this, value);
}
}
}
@@ -164,7 +165,7 @@ function WeakSetDelete(value) {
%FunctionSetPrototype(GlobalWeakSet, new GlobalObject());
%AddNamedProperty(GlobalWeakSet.prototype, "constructor", GlobalWeakSet,
DONT_ENUM);
-%AddNamedProperty(GlobalWeakSet.prototype, symbolToStringTag, "WeakSet",
+%AddNamedProperty(GlobalWeakSet.prototype, toStringTagSymbol, "WeakSet",
DONT_ENUM | READ_ONLY);
// Set up the non-enumerable functions on the WeakSet prototype object.
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 74d5d2436a..c66e86df3f 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -277,7 +277,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) {
Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, sizeof(int32_t));
+ Assembler::FlushICacheWithoutIsolate(pc, sizeof(int32_t));
}
}
@@ -404,7 +404,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -451,7 +451,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL) {
@@ -527,8 +527,8 @@ void RelocInfo::set_debug_call_address(Address target) {
DCHECK(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence());
Memory::Address_at(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset) =
target;
- CpuFeatures::FlushICache(pc_ + Assembler::kPatchDebugBreakSlotAddressOffset,
- sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(
+ pc_ + Assembler::kPatchDebugBreakSlotAddressOffset, sizeof(Address));
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -541,7 +541,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -566,7 +566,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICache(heap->isolate(), pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 421368a5d1..38d7e5abeb 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -23,13 +23,20 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- rax : number of arguments excluding receiver
// -- rdi : called function (only guaranteed when
// extra_args requires it)
- // -- rsi : context
// -- rsp[0] : return address
// -- rsp[8] : last argument
// -- ...
// -- rsp[8 * argc] : first argument (argc == rax)
// -- rsp[8 * (argc + 1)] : receiver
// -----------------------------------
+ __ AssertFunction(rdi);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -98,8 +105,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- rax: number of arguments
// -- rdi: constructor function
@@ -107,9 +113,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- rdx: original constructor
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
@@ -189,9 +192,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Now allocate the JSObject on the heap.
__ movzxbp(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
__ shlp(rdi, Immediate(kPointerSizeLog2));
- if (create_memento) {
- __ addp(rdi, Immediate(AllocationMemento::kSize));
- }
// rdi: size of new object
__ Allocate(rdi,
rbx,
@@ -199,11 +199,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
no_reg,
&rt_call,
NO_ALLOCATION_FLAGS);
- Factory* factory = masm->isolate()->factory();
// Allocated the JSObject, now initialize the fields.
// rax: initial map
// rbx: JSObject (not HeapObject tagged - the actual address).
- // rdi: start of next object (including memento if create_memento)
+ // rdi: start of next object
__ movp(Operand(rbx, JSObject::kMapOffset), rax);
__ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
__ movp(Operand(rbx, JSObject::kPropertiesOffset), rcx);
@@ -211,7 +210,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Set extra fields in the newly allocated object.
// rax: initial map
// rbx: JSObject
- // rdi: start of next object (including memento if create_memento)
+ // rdi: start of next object
// rsi: slack tracking counter (non-API function case)
__ leap(rcx, Operand(rbx, JSObject::kHeaderSize));
__ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
@@ -243,21 +242,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ leap(rsi, Operand(rdi, -AllocationMemento::kSize));
- __ InitializeFieldsWithFiller(rcx, rsi, rdx);
- // Fill in memento fields if necessary.
- // rsi: points to the allocated but uninitialized memento.
- __ Move(Operand(rsi, AllocationMemento::kMapOffset),
- factory->allocation_memento_map());
- // Get the cell or undefined.
- __ movp(rdx, Operand(rsp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(rdx);
- __ movp(Operand(rsi, AllocationMemento::kAllocationSiteOffset), rdx);
- } else {
- __ InitializeFieldsWithFiller(rcx, rdi, rdx);
- }
+ __ InitializeFieldsWithFiller(rcx, rdi, rdx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -273,48 +259,19 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: original constructor
__ bind(&rt_call);
int offset = kPointerSize;
- if (create_memento) {
- // Get the cell or allocation site.
- __ movp(rdi, Operand(rsp, kPointerSize * 3));
- __ Push(rdi); // argument 1: allocation site
- offset += kPointerSize;
- }
// Must restore rsi (context) and rdi (constructor) before calling runtime.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ movp(rdi, Operand(rsp, offset));
__ Push(rdi); // argument 2/1: constructor function
__ Push(rdx); // argument 3/2: original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ CallRuntime(Runtime::kNewObject, 2);
__ movp(rbx, rax); // store result in rbx
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
// New object allocated.
// rbx: newly allocated object
__ bind(&allocated);
- if (create_memento) {
- __ movp(rcx, Operand(rsp, 3 * kPointerSize));
- __ Cmp(rcx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &count_incremented);
- // rcx is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ SmiAddConstant(
- FieldOperand(rcx, AllocationSite::kPretenureCreateCountOffset),
- Smi::FromInt(1));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ Pop(rdx);
__ Pop(rdi);
@@ -403,12 +360,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -491,9 +448,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
enum IsTagged { kRaxIsSmiTagged, kRaxIsUntaggedInt };
-// Clobbers rcx, rdx, kScratchRegister; preserves all other registers.
+// Clobbers rcx, r11, kScratchRegister; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset,
IsTagged rax_is_tagged) {
// rax : the number of items to be pushed to the stack
//
@@ -506,26 +462,21 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
// Make rcx the space we have left. The stack might already be overflowed
// here which will cause rcx to become negative.
__ subp(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
+ // Make r11 the space we need for the array when it is unrolled onto the
// stack.
if (rax_is_tagged == kRaxIsSmiTagged) {
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ __ PositiveSmiTimesPowerOfTwoToInteger64(r11, rax, kPointerSizeLog2);
} else {
DCHECK(rax_is_tagged == kRaxIsUntaggedInt);
- __ movp(rdx, rax);
- __ shlq(rdx, Immediate(kPointerSizeLog2));
+ __ movp(r11, rax);
+ __ shlq(r11, Immediate(kPointerSizeLog2));
}
// Check if the arguments will overflow the stack.
- __ cmpp(rcx, rdx);
+ __ cmpp(rcx, r11);
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ Push(Operand(rbp, calleeOffset));
- if (rax_is_tagged == kRaxIsUntaggedInt) {
- __ Integer32ToSmi(rax, rax);
- }
- __ Push(rax);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -536,8 +487,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Expects five C++ function parameters.
- // - Address entry (ignored)
- // - JSFunction* function (
+ // - Object* new_target
+ // - JSFunction* function
// - Object* receiver
// - int argc
// - Object*** argv
@@ -548,11 +499,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Platform specific argument handling. After this, the stack contains
// an internal frame and the pushed function and receiver, and
// register rax and rbx holds the argument count and argument array,
- // while rdi holds the function pointer and rsi the context.
+ // while rdi holds the function pointer, rsi the context, and rdx the
+ // new.target.
#ifdef _WIN64
// MSVC parameters in:
- // rcx : entry (ignored)
+ // rcx : new_target
// rdx : function
// r8 : receiver
// r9 : argc
@@ -560,11 +512,14 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Clear the context before we push it when entering the internal frame.
__ Set(rsi, 0);
+
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
- // Load the function context into rsi.
- __ movp(rsi, FieldOperand(rdx, JSFunction::kContextOffset));
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ movp(rsi, masm->ExternalOperand(context_address));
// Push the function and the receiver onto the stack.
__ Push(rdx);
@@ -577,30 +532,42 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ movp(rbx, Operand(kScratchRegister, EntryFrameConstants::kArgvOffset));
// Load the function pointer into rdi.
__ movp(rdi, rdx);
+ // Load the new.target into rdx.
+ __ movp(rdx, rcx);
#else // _WIN64
// GCC parameters in:
- // rdi : entry (ignored)
+ // rdi : new_target
// rsi : function
// rdx : receiver
// rcx : argc
// r8 : argv
+ __ movp(r11, rdi);
__ movp(rdi, rsi);
// rdi : function
+ // r11 : new_target
// Clear the context before we push it when entering the internal frame.
__ Set(rsi, 0);
+
// Enter an internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the function and receiver and setup the context.
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ movp(rsi, masm->ExternalOperand(context_address));
+
+ // Push the function and receiver onto the stack.
__ Push(rdi);
__ Push(rdx);
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Load the number of arguments and setup pointer to the arguments.
__ movp(rax, rcx);
__ movp(rbx, r8);
+
+ // Load the new.target into rdx.
+ __ movp(rdx, r11);
#endif // _WIN64
// Current stack contents:
@@ -612,21 +579,18 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// rbx : argv
// rsi : context
// rdi : function
+ // rdx : new.target
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kRegisterSize;
- // Expects argument count in rax. Clobbers rcx, rdx.
- Generate_CheckStackOverflow(masm, kFunctionOffset, kRaxIsUntaggedInt);
+ // Expects argument count in rax. Clobbers rcx, r11.
+ Generate_CheckStackOverflow(masm, kRaxIsUntaggedInt);
// Copy arguments to the stack in a loop.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
Label loop, entry;
__ Set(rcx, 0); // Set loop variable to 0.
- __ jmp(&entry);
+ __ jmp(&entry, Label::kNear);
__ bind(&loop);
__ movp(kScratchRegister, Operand(rbx, rcx, times_pointer_size, 0));
__ Push(Operand(kScratchRegister, 0)); // dereference handle
@@ -635,18 +599,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmpp(rcx, rax);
__ j(not_equal, &loop);
- // Invoke the code.
- if (is_construct) {
- // No type feedback cell is available
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- // Expects rdi to hold function pointer.
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(rax);
- // Function must be in rdi.
- __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
- }
+ // Invoke the builtin code.
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
// Exit the internal frame. Notice that this also removes the empty
// context and the function left on the stack by the code
// invocation.
@@ -718,7 +676,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ subp(rdx, rcx);
__ CompareRoot(rdx, Heap::kRealStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -802,9 +760,14 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ leave();
- // Return droping receiver + arguments.
- // TODO(rmcilroy): Get number of arguments from BytecodeArray.
- __ Ret(1 * kPointerSize, rcx);
+
+ // Drop receiver + arguments and return.
+ __ movl(rbx, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ PopReturnAddressTo(rcx);
+ __ addp(rsp, rbx);
+ __ PushReturnAddressFrom(rcx);
+ __ ret(0);
}
@@ -998,6 +961,7 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Stack Layout:
// rsp[0] : Return address
@@ -1005,162 +969,46 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// rsp[16] : Argument n-1
// ...
// rsp[8 * n] : Argument 1
- // rsp[8 * (n + 1)] : Receiver (function to call)
+ // rsp[8 * (n + 1)] : Receiver (callable to call)
//
// rax contains the number of arguments, n, not counting the receiver.
//
// 1. Make sure we have at least one argument.
- { Label done;
+ {
+ Label done;
__ testp(rax, rax);
- __ j(not_zero, &done);
+ __ j(not_zero, &done, Label::kNear);
__ PopReturnAddressTo(rbx);
- __ Push(masm->isolate()->factory()->undefined_value());
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
__ PushReturnAddressFrom(rbx);
__ incp(rax);
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- StackArgumentsAccessor args(rsp, rax);
- __ movp(rdi, args.GetReceiverOperand());
- __ JumpIfSmi(rdi, &non_function);
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &slow);
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives.
- // SharedFunctionInfo is already loaded into rbx.
- __ testb(FieldOperand(rbx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_zero, &shift_arguments);
-
- // Compute the receiver in sloppy mode.
- __ movp(rbx, args.GetArgumentOperand(1));
- __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
-
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_proxy);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_proxy);
-
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
- {
- // Enter an internal frame in order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Integer32ToSmi(rax, rax);
- __ Push(rax);
-
- __ movp(rax, rbx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ movp(rbx, rax);
- __ Set(rdx, 0); // indicate regular JS_FUNCTION
-
- __ Pop(rax);
- __ SmiToInteger32(rax, rax);
- }
-
- // Restore the function to rdi.
+ // 2. Get the callable to call (passed as receiver) from the stack.
+ {
+ StackArgumentsAccessor args(rsp, rax);
__ movp(rdi, args.GetReceiverOperand());
- __ jmp(&patch_receiver, Label::kNear);
-
- __ bind(&use_global_proxy);
- __ movp(rbx,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ movp(args.GetArgumentOperand(1), rbx);
-
- __ jmp(&shift_arguments);
}
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Set(rdx, 1); // indicate function proxy
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Set(rdx, 2); // indicate non-function
-
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ movp(args.GetArgumentOperand(1), rdi);
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
+ {
+ Label loop;
__ movp(rcx, rax);
StackArgumentsAccessor args(rsp, rcx);
__ bind(&loop);
__ movp(rbx, args.GetArgumentOperand(1));
__ movp(args.GetArgumentOperand(0), rbx);
__ decp(rcx);
- __ j(not_zero, &loop); // While non-zero.
+ __ j(not_zero, &loop); // While non-zero.
__ DropUnderReturnAddress(1, rbx); // Drop one slot under return address.
__ decp(rax); // One fewer argument (first argument is new receiver).
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ testp(rdx, rdx);
- __ j(zero, &function);
- __ Set(rbx, 0);
- __ cmpp(rdx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ PopReturnAddressTo(rdx);
- __ Push(rdi); // re-add proxy object as additional argument
- __ PushReturnAddressFrom(rdx);
- __ incp(rax);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ LoadSharedFunctionInfoSpecialField(rbx, rdx,
- SharedFunctionInfo::kFormalParameterCountOffset);
- __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
- __ cmpp(rax, rbx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- ParameterCount expected(0);
- __ InvokeCode(rdx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1242,96 +1090,29 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ Push(Operand(rbp, kFunctionOffset));
__ Push(Operand(rbp, kArgumentsOffset));
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
- Generate_CheckStackOverflow(masm, kFunctionOffset, kRaxIsSmiTagged);
+ Generate_CheckStackOverflow(masm, kRaxIsSmiTagged);
// Push current index and limit, and receiver.
const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ Push(rax); // limit
- __ Push(Immediate(0)); // index
-
- // Get the receiver.
- __ movp(rbx, Operand(rbp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver;
- __ movp(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- Label call_to_object, use_global_proxy;
- __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
- Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Do not transform the receiver for natives.
- __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
- Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in sloppy mode.
- __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
- __ CompareRoot(rbx, Heap::kNullValueRootIndex);
- __ j(equal, &use_global_proxy);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &use_global_proxy);
-
- // If given receiver is already a JavaScript object then there's no
- // reason for converting it.
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &push_receiver);
-
- // Convert the receiver to an object.
- __ bind(&call_to_object);
- __ movp(rax, rbx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ movp(rbx, rax);
- __ jmp(&push_receiver, Label::kNear);
-
- __ bind(&use_global_proxy);
- __ movp(rbx,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ Push(rbx);
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
+ __ Push(Operand(rbp, kReceiverOffset)); // receiver
// Loop over the arguments array, pushing each value to the stack
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(rax);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ movp(rdi, Operand(rbp, kFunctionOffset));
- __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
-
- frame_scope.GenerateLeaveFrame();
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ Push(rdi); // add function proxy as last argument
- __ incp(rax);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
// Leave internal frame.
}
@@ -1382,9 +1163,10 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ Push(Operand(rbp, kFunctionOffset));
__ Push(Operand(rbp, kArgumentsOffset));
__ Push(Operand(rbp, kNewTargetOffset));
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
- Generate_CheckStackOverflow(masm, kFunctionOffset, kRaxIsSmiTagged);
+ Generate_CheckStackOverflow(masm, kRaxIsSmiTagged);
// Push current index and limit.
const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
@@ -1490,7 +1272,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax : number of arguments
// -- rdi : constructor function
@@ -1498,121 +1281,138 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- rsp[(argc - n) * 8] : arg[n] (zero-based)
// -- rsp[(argc + 1) * 8] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
-
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, rcx);
- __ cmpp(rdi, rcx);
- __ Assert(equal, kUnexpectedStringFunction);
- }
- // Load the first argument into rax and get rid of the rest
- // (including the receiver).
- StackArgumentsAccessor args(rsp, rax);
+ // 1. Load the first argument into rax and get rid of the rest (including the
+ // receiver).
Label no_arguments;
- __ testp(rax, rax);
- __ j(zero, &no_arguments);
- __ movp(rbx, args.GetArgumentOperand(1));
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ movp(rax, rbx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(rax, // Input.
- rbx, // Result.
- rcx, // Scratch 1.
- rdx, // Scratch 2.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
+ {
+ StackArgumentsAccessor args(rsp, rax);
+ __ testp(rax, rax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ movp(rax, rbx);
+ }
- // ----------- S t a t e -------------
- // -- rbx : argument converted to string
- // -- rdi : constructor function
- // -- rsp[0] : return address
- // -----------------------------------
+ // 2a. At least one argument, return rax if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(rax, &to_string, Label::kNear);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above, &to_string, Label::kNear);
+ __ j(equal, &symbol_descriptive_string, Label::kNear);
+ __ Ret();
+ }
- // Allocate a JSValue and put the tagged pointer into rax.
- Label gc_required;
- __ Allocate(JSValue::kSize,
- rax, // Result.
- rcx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(rdi, rcx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(rcx, Map::kInstanceSizeOffset),
- Immediate(JSValue::kSize >> kPointerSizeLog2));
- __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
- __ cmpb(FieldOperand(rcx, Map::kUnusedPropertyFieldsOffset), Immediate(0));
- __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(rax, Heap::kempty_stringRootIndex);
+ __ ret(1 * kPointerSize);
}
- __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
- // Set properties and elements.
- __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
- __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
- __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ // 3a. Convert rax to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ }
- // Set the value.
- __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ // 3b. Convert symbol in rax to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ PopReturnAddressTo(rcx);
+ __ Push(rax);
+ __ PushReturnAddressFrom(rcx);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- // We're done. Return.
- __ ret(0);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : number of arguments
+ // -- rdi : constructor function
+ // -- rsp[0] : return address
+ // -- rsp[(argc - n) * 8] : arg[n] (zero-based)
+ // -- rsp[(argc + 1) * 8] : receiver
+ // -----------------------------------
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(rax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(rax, rbx, rcx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ movp(rbx, rax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into rbx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
+ // 1. Load the first argument into rbx and get rid of the rest (including the
+ // receiver).
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rdi); // Preserve the function.
- __ Push(rax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ Pop(rdi);
+ StackArgumentsAccessor args(rsp, rax);
+ Label no_arguments, done;
+ __ testp(rax, rax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ movp(rbx, args.GetArgumentOperand(1));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
+ __ bind(&done);
+ __ PopReturnAddressTo(rcx);
+ __ leap(rsp, Operand(rsp, rax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(rcx);
}
- __ movp(rbx, rax);
- __ jmp(&argument_is_string);
- // Load the empty string into rbx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ LoadRoot(rbx, Heap::kempty_stringRootIndex);
- __ PopReturnAddressTo(rcx);
- __ leap(rsp, Operand(rsp, kPointerSize));
- __ PushReturnAddressFrom(rcx);
- __ jmp(&argument_is_string);
+ // 2. Make sure rbx is a string.
+ {
+ Label convert, done_convert;
+ __ JumpIfSmi(rbx, &convert, Label::kNear);
+ __ CmpObjectType(rbx, FIRST_NONSTRING_TYPE, rdx);
+ __ j(below, &done_convert);
+ __ bind(&convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(rdi);
+ __ Move(rax, rbx);
+ __ CallStub(&stub);
+ __ Move(rbx, rax);
+ __ Pop(rdi);
+ }
+ __ bind(&done_convert);
+ }
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(rbx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- rbx : the first argument
+ // -- rdi : constructor function
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Allocate(JSValue::kSize, rax, rcx, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in rax.
+ __ LoadGlobalFunctionInitialMap(rdi, rcx);
+ __ movp(FieldOperand(rax, HeapObject::kMapOffset), rcx);
+ __ LoadRoot(rcx, Heap::kEmptyFixedArrayRootIndex);
+ __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), rcx);
+ __ movp(FieldOperand(rax, JSObject::kElementsOffset), rcx);
+ __ movp(FieldOperand(rax, JSValue::kValueOffset), rbx);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rbx);
+ __ Push(rdi);
+ __ Push(Smi::FromInt(JSValue::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(rdi);
+ __ Pop(rbx);
+ }
+ __ jmp(&done_allocate);
}
- __ ret(0);
}
@@ -1783,6 +1583,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Call the entry point.
__ bind(&invoke);
+ __ movp(rax, rbx);
+ // rax : expected number of arguments
+ // rdi: function (passed through to callee)
__ call(rdx);
// Store offset of return address for deoptimizer.
@@ -1802,12 +1605,268 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ int3();
}
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdi : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ StackArgumentsAccessor args(rsp, rax);
+ __ AssertFunction(rdi);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ testb(FieldOperand(rdx, SharedFunctionInfo::kNativeByteOffset),
+ Immediate((1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
+ __ j(not_zero, &done_convert);
+ {
+ __ movp(rcx, args.GetReceiverOperand());
+
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rcx : the receiver
+ // -- rdx : the shared function info.
+ // -- rdi : the function to call (checked to be a JSFunction)
+ // -- rsi : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(rcx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rcx, FIRST_JS_RECEIVER_TYPE, rbx);
+ __ j(above_equal, &done_convert);
+ __ JumpIfRoot(rcx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
+ Label::kNear);
+ __ JumpIfNotRoot(rcx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(rcx);
+ }
+ __ jmp(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Integer32ToSmi(rax, rax);
+ __ Push(rax);
+ __ Push(rdi);
+ __ movp(rax, rcx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ movp(rcx, rax);
+ __ Pop(rdi);
+ __ Pop(rax);
+ __ SmiToInteger32(rax, rax);
+ }
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ movp(args.GetReceiverOperand(), rcx);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the shared function info.
+ // -- rdi : the function to call (checked to be a JSFunction)
+ // -- rsi : the function context.
+ // -----------------------------------
+
+ __ LoadSharedFunctionInfoSpecialField(
+ rbx, rdx, SharedFunctionInfo::kFormalParameterCountOffset);
+ __ movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
+ ParameterCount actual(rax);
+ ParameterCount expected(rbx);
+ __ InvokeCode(rdx, expected, actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdi : the target to call (can be any Object)
+ // -----------------------------------
+ StackArgumentsAccessor args(rsp, rax);
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(rdi, &non_callable);
+ __ bind(&non_smi);
+ __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+ __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ movp(rdi, FieldOperand(rdi, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(rdi);
+ __ jmp(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsCallable));
+ __ j(zero, &non_callable, Label::kNear);
+ // Overwrite the original receiver with the (original) target.
+ __ movp(args.GetReceiverOperand(), rdi);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, rdi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the original constructor (checked to be a JSFunction)
+ // -- rdi : the constructor to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(rdx);
+ __ AssertFunction(rdi);
+
+ // Calling convention for function specific ConstructStubs require
+ // rbx to contain either an AllocationSite or undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (checked to be a JSFunctionProxy)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ movp(rdi, FieldOperand(rdi, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- rdi : the constructor to call (can be any Object)
+ // -----------------------------------
+ StackArgumentsAccessor args(rsp, rax);
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(rdi, &non_constructor, Label::kNear);
+ __ movp(rcx, FieldOperand(rdi, HeapObject::kMapOffset));
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsConstructor));
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Dispatch based on instance type.
+ __ CmpInstanceType(rcx, JS_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ movp(args.GetReceiverOperand(), rdi);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, rdi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(rdi);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rbx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- rdi : the target to call (can be any Object).
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(rdx);
+
+ // Find the address of the last argument.
+ __ movp(rcx, rax);
+ __ addp(rcx, Immediate(1)); // Add one for receiver.
+ __ shlp(rcx, Immediate(kPointerSizeLog2));
+ __ negp(rcx);
+ __ addp(rcx, rbx);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ j(always, &loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(rbx, 0));
+ __ subp(rbx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmpp(rbx, rcx);
+ __ j(greater, &loop_header, Label::kNear);
+
+ // Call the target.
+ __ Push(rdx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 1416fbd8b9..0942b2fb3c 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -14,6 +14,7 @@
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/x64/code-stubs-x64.h"
namespace v8 {
namespace internal {
@@ -595,47 +596,46 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
- // Stack layout:
- // rsp[0] : return address
- // rsp[8] : number of parameters (tagged)
- // rsp[16] : receiver displacement
- // rsp[24] : function
+ // rcx : number of parameters (tagged)
+ // rdx : parameters pointer
+ // rdi : function
+ // rsp[0] : return address
// Registers used over the whole function:
// rbx: the mapped parameter count (untagged)
// rax: the allocated object (tagged).
Factory* factory = isolate()->factory();
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ SmiToInteger64(rbx, args.GetArgumentOperand(2));
+ DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
+
+ __ SmiToInteger64(rbx, rcx);
// rbx = parameter count (untagged)
// Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ Label adaptor_frame, try_allocate, runtime;
+ __ movp(rax, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(r8, Operand(rax, StandardFrameConstants::kContextOffset));
+ __ Cmp(r8, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// No adaptor, parameter count = argument count.
- __ movp(rcx, rbx);
+ __ movp(r11, rbx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
- __ SmiToInteger64(rcx,
- Operand(rdx,
- ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movp(args.GetArgumentOperand(1), rdx);
+ __ SmiToInteger64(
+ r11, Operand(rax, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ leap(rdx, Operand(rax, r11, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
// rbx = parameter count (untagged)
- // rcx = argument count (untagged)
- // Compute the mapped parameter count = min(rbx, rcx) in rbx.
- __ cmpp(rbx, rcx);
+ // r11 = argument count (untagged)
+ // Compute the mapped parameter count = min(rbx, r11) in rbx.
+ __ cmpp(rbx, r11);
__ j(less_equal, &try_allocate, Label::kNear);
- __ movp(rbx, rcx);
+ __ movp(rbx, r11);
__ bind(&try_allocate);
@@ -651,66 +651,65 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ bind(&no_parameter_map);
// 2. Backing store.
- __ leap(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(r8, Operand(r8, r11, times_pointer_size, FixedArray::kHeaderSize));
// 3. Arguments object.
__ addp(r8, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
+ __ Allocate(r8, rax, r9, no_reg, &runtime, TAG_OBJECT);
// rax = address of new object(s) (tagged)
- // rcx = argument count (untagged)
- // Get the arguments map from the current native context into rdi.
+ // r11 = argument count (untagged)
+ // Get the arguments map from the current native context into r9.
Label has_mapped_parameters, instantiate;
- __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
+ __ movp(r9, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+ __ movp(r9, FieldOperand(r9, GlobalObject::kNativeContextOffset));
__ testp(rbx, rbx);
__ j(not_zero, &has_mapped_parameters, Label::kNear);
const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
- __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
+ __ movp(r9, Operand(r9, Context::SlotOffset(kIndex)));
__ jmp(&instantiate, Label::kNear);
const int kAliasedIndex = Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX;
__ bind(&has_mapped_parameters);
- __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
+ __ movp(r9, Operand(r9, Context::SlotOffset(kAliasedIndex)));
__ bind(&instantiate);
// rax = address of new object (tagged)
// rbx = mapped parameter count (untagged)
- // rcx = argument count (untagged)
- // rdi = address of arguments map (tagged)
- __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
+ // r11 = argument count (untagged)
+ // r9 = address of arguments map (tagged)
+ __ movp(FieldOperand(rax, JSObject::kMapOffset), r9);
__ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
__ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
__ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ movp(rdx, args.GetArgumentOperand(0));
- __ AssertNotSmi(rdx);
+ __ AssertNotSmi(rdi);
__ movp(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- rdx);
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ rdi);
// Use the length (smi tagged) and set that as an in-object property too.
- // Note: rcx is tagged from here on.
+ // Note: r11 is tagged from here on.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ Integer32ToSmi(rcx, rcx);
+ __ Integer32ToSmi(r11, r11);
__ movp(FieldOperand(rax, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize),
- rcx);
+ Heap::kArgumentsLengthIndex * kPointerSize),
+ r11);
// Set up the elements pointer in the allocated arguments object.
- // If we allocated a parameter map, edi will point there, otherwise to the
+ // If we allocated a parameter map, rdi will point there, otherwise to the
// backing store.
__ leap(rdi, Operand(rax, Heap::kSloppyArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
// rax = address of new object (tagged)
// rbx = mapped parameter count (untagged)
- // rcx = argument count (tagged)
+ // r11 = argument count (tagged)
// rdi = address of parameter map or backing store (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
@@ -740,48 +739,42 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Load tagged parameter count into r9.
__ Integer32ToSmi(r9, rbx);
__ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
- __ addp(r8, args.GetArgumentOperand(2));
+ __ addp(r8, rcx);
__ subp(r8, r9);
- __ Move(r11, factory->the_hole_value());
- __ movp(rdx, rdi);
+ __ movp(rcx, rdi);
__ leap(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
- // r9 = loop variable (tagged)
+ __ SmiToInteger64(r9, r9);
+ // r9 = loop variable (untagged)
// r8 = mapping index (tagged)
- // r11 = the hole value
- // rdx = address of parameter map (tagged)
+ // rcx = address of parameter map (tagged)
// rdi = address of backing store (tagged)
__ jmp(&parameters_test, Label::kNear);
__ bind(&parameters_loop);
- __ SmiSubConstant(r9, r9, Smi::FromInt(1));
- __ SmiToInteger64(kScratchRegister, r9);
- __ movp(FieldOperand(rdx, kScratchRegister,
- times_pointer_size,
- kParameterMapHeaderSize),
+ __ subp(r9, Immediate(1));
+ __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+ __ movp(FieldOperand(rcx, r9, times_pointer_size, kParameterMapHeaderSize),
r8);
- __ movp(FieldOperand(rdi, kScratchRegister,
- times_pointer_size,
- FixedArray::kHeaderSize),
- r11);
+ __ movp(FieldOperand(rdi, r9, times_pointer_size, FixedArray::kHeaderSize),
+ kScratchRegister);
__ SmiAddConstant(r8, r8, Smi::FromInt(1));
__ bind(&parameters_test);
- __ SmiTest(r9);
+ __ testp(r9, r9);
__ j(not_zero, &parameters_loop, Label::kNear);
__ bind(&skip_parameter_map);
- // rcx = argument count (tagged)
+ // r11 = argument count (tagged)
// rdi = address of backing store (tagged)
// Copy arguments header and remaining slots (if there are any).
__ Move(FieldOperand(rdi, FixedArray::kMapOffset),
factory->fixed_array_map());
- __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movp(FieldOperand(rdi, FixedArray::kLengthOffset), r11);
Label arguments_loop, arguments_test;
__ movp(r8, rbx);
- __ movp(rdx, args.GetArgumentOperand(1));
- // Untag rcx for the loop below.
- __ SmiToInteger64(rcx, rcx);
+ // Untag r11 for the loop below.
+ __ SmiToInteger64(r11, r11);
__ leap(kScratchRegister, Operand(r8, times_pointer_size, 0));
__ subp(rdx, kScratchRegister);
__ jmp(&arguments_test, Label::kNear);
@@ -796,73 +789,56 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ addp(r8, Immediate(1));
__ bind(&arguments_test);
- __ cmpp(r8, rcx);
+ __ cmpp(r8, r11);
__ j(less, &arguments_loop, Label::kNear);
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
+ // Return.
+ __ ret(0);
// Do the runtime call to allocate the arguments object.
- // rcx = argument count (untagged)
+ // r11 = argument count (untagged)
__ bind(&runtime);
- __ Integer32ToSmi(rcx, rcx);
- __ movp(args.GetArgumentOperand(2), rcx); // Patch argument count.
+ __ Integer32ToSmi(r11, r11);
+ __ PopReturnAddressTo(rax);
+ __ Push(rdi); // Push function.
+ __ Push(rdx); // Push parameters pointer.
+ __ Push(r11); // Push parameter count.
+ __ PushReturnAddressFrom(rax);
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
+ // rcx : number of parameters (tagged)
+ // rdx : parameters pointer
+ // rdi : function
+ // rsp[0] : return address
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movp(args.GetArgumentOperand(2), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movp(args.GetArgumentOperand(1), rdx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
-}
-
-
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : language mode
- // rsp[16] : index of rest parameter
- // rsp[24] : number of parameters
- // rsp[32] : receiver displacement
+ DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &runtime);
// Patch the arguments.length and the parameters pointer.
- StackArgumentsAccessor args(rsp, 4, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ movp(args.GetArgumentOperand(1), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
+ StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger64(rax, rcx);
+ __ leap(rdx, Operand(rbx, rax, times_pointer_size,
StandardFrameConstants::kCallerSPOffset));
- __ movp(args.GetArgumentOperand(0), rdx);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
+ __ PopReturnAddressTo(rax);
+ __ Push(rdi); // Push function.
+ __ Push(rdx); // Push parameters pointer.
+ __ Push(rcx); // Push parameter count.
+ __ PushReturnAddressFrom(rax);
+ __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
@@ -928,46 +904,45 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
- // rsp[0] : return address
- // rsp[8] : number of parameters
- // rsp[16] : receiver displacement
- // rsp[24] : function
+ // rcx : number of parameters (tagged)
+ // rdx : parameters pointer
+ // rdi : function
+ // rsp[0] : return address
+
+ DCHECK(rdi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(rcx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(rdx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
- __ movp(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
- __ movp(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
- __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ __ movp(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+ __ movp(rax, Operand(rbx, StandardFrameConstants::kContextOffset));
+ __ Cmp(rax, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adaptor_frame);
// Get the length from the frame.
- StackArgumentsAccessor args(rsp, 3, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rcx, args.GetArgumentOperand(2));
- __ SmiToInteger64(rcx, rcx);
+ __ SmiToInteger64(rax, rcx);
__ jmp(&try_allocate);
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
- __ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ movp(args.GetArgumentOperand(2), rcx);
- __ SmiToInteger64(rcx, rcx);
- __ leap(rdx, Operand(rdx, rcx, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
- __ movp(args.GetArgumentOperand(1), rdx);
+ __ movp(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiToInteger64(rax, rcx);
+ __ leap(rdx, Operand(rbx, rax, times_pointer_size,
+ StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ testp(rcx, rcx);
+ __ testp(rax, rax);
__ j(zero, &add_arguments_object, Label::kNear);
- __ leap(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
+ __ leap(rax, Operand(rax, times_pointer_size, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ addp(rcx, Immediate(Heap::kStrictArgumentsObjectSize));
+ __ addp(rax, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
- __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
+ __ Allocate(rax, rax, rbx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
__ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -982,7 +957,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ movp(rcx, args.GetArgumentOperand(2));
__ movp(FieldOperand(rax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
rcx);
@@ -992,18 +966,14 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ testp(rcx, rcx);
__ j(zero, &done);
- // Get the parameters pointer from the stack.
- __ movp(rdx, args.GetArgumentOperand(1));
-
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ leap(rdi, Operand(rax, Heap::kStrictArgumentsObjectSize));
__ movp(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movp(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
-
-
__ movp(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+
// Untag the length for the loop below.
__ SmiToInteger64(rcx, rcx);
@@ -1017,12 +987,17 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ decp(rcx);
__ j(not_zero, &loop);
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ ret(3 * kPointerSize);
+ __ ret(0);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ PopReturnAddressTo(rax);
+ __ Push(rdi); // Push function.
+ __ Push(rdx); // Push parameters pointer.
+ __ Push(rcx); // Push parameter count.
+ __ PushReturnAddressFrom(rax);
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
@@ -1761,24 +1736,16 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ Push(rax);
// Figure out which native to call and setup the arguments.
- if (cc == equal && strict()) {
+ if (cc == equal) {
__ PushReturnAddressFrom(rcx);
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = Builtins::EQUALS;
- } else {
- builtin =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
- }
-
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
__ PushReturnAddressFrom(rcx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1849,28 +1816,26 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(r11, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
- __ j(not_equal, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ CheckSmi(FieldOperand(r11, WeakCell::kValueOffset));
__ j(equal, &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &miss);
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(FieldOperand(r11, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
- __ cmpp(rdi, r11);
- __ j(not_equal, &megamorphic);
- __ jmp(&done);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ cmpp(rdi, r11);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done);
__ bind(&miss);
@@ -1889,23 +1854,20 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
- __ cmpp(rdi, r11);
- __ j(not_equal, &not_array_function);
-
- CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ jmp(&done_no_smi_convert);
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r11);
+ __ cmpp(rdi, r11);
+ __ j(not_equal, &not_array_function);
- __ bind(&not_array_function);
- }
-
- CreateWeakCellStub create_stub(isolate);
+ CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub, is_super);
__ jmp(&done_no_smi_convert);
+ __ bind(&not_array_function);
+ CreateWeakCellStub weak_cell_stub(isolate);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
+ __ jmp(&done_no_smi_convert);
+
__ bind(&done);
__ Integer32ToSmi(rdx, rdx);
@@ -1928,36 +1890,10 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(Isolate* isolate,
- MacroAssembler* masm,
- StackArgumentsAccessor* args,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, non_function);
- __ PopReturnAddressTo(rcx);
- __ Push(rdi); // put proxy as additional argument under return address
- __ PushReturnAddressFrom(rcx);
- __ Set(rax, argc + 1);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor =
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ movp(args->GetReceiverOperand(), rdi);
+static void EmitSlowCase(MacroAssembler* masm, StackArgumentsAccessor* args,
+ int argc) {
__ Set(rax, argc);
- __ Set(rbx, 0);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor =
- isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ Jump(adaptor, RelocInfo::CODE_TARGET);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1982,13 +1918,12 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
// rdi : the function to call
// wrap_and_call can only be true if we are compiling a monomorphic method.
- Isolate* isolate = masm->isolate();
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
StackArgumentsAccessor args(rsp, argc);
if (needs_checks) {
// Check that the function really is a JavaScript function.
- __ JumpIfSmi(rdi, &non_function);
+ __ JumpIfSmi(rdi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
@@ -2023,7 +1958,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- EmitSlowCase(isolate, masm, &args, argc, &non_function);
+ EmitSlowCase(masm, &args, argc);
}
if (call_as_method) {
@@ -2044,34 +1979,26 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// rcx : original constructor (for IsSuperConstructorCall)
// rdx : slot in feedback vector (Smi, for RecordCallTarget)
// rdi : constructor function
- Label slow, non_function_call;
- // Check that function is not a smi.
- __ JumpIfSmi(rdi, &non_function_call);
- // Check that function is a JSFunction.
+ Label non_function;
+ // Check that the constructor is not a smi.
+ __ JumpIfSmi(rdi, &non_function);
+ // Check that constructor is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, r11);
- __ j(not_equal, &slow);
+ __ j(not_equal, &non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
__ SmiToInteger32(rdx, rdx);
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into ebx.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by rdx + 1.
- __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into rbx, or undefined.
- __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(equal, &feedback_register_initialized);
- __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into rbx, or undefined.
+ __ movp(rbx, FieldOperand(rbx, rdx, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(FieldOperand(rbx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(equal, &feedback_register_initialized);
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(rbx);
}
@@ -2083,64 +2010,29 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ movp(rdx, rdi);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = rcx;
- __ movp(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
- __ movp(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ leap(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // rdi: called object
- // rax: number of arguments
- // r11: object map
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(r11, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing rax).
- __ Set(rbx, 0);
- __ Jump(isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ movp(rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(rcx, FieldOperand(rcx, SharedFunctionInfo::kConstructStubOffset));
+ __ leap(rcx, FieldOperand(rcx, Code::kHeaderSize));
+ __ jmp(rcx);
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- __ movp(vector, FieldOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ __ movp(rdx, rdi);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// rdi - function
- // rdx - slot id (as integer)
+ // rdx - slot id
// rbx - vector
- Label miss;
- int argc = arg_count();
- ParameterCount actual(argc);
-
- __ SmiToInteger32(rdx, rdx);
-
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
- __ cmpp(rdi, rcx);
- __ j(not_equal, &miss);
+ // rcx - allocation site (loaded from vector[slot]).
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r8);
+ __ cmpp(rdi, r8);
+ __ j(not_equal, miss);
__ movp(rax, Immediate(arg_count()));
- __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
- FixedArray::kHeaderSize));
- // Verify that ecx contains an AllocationSite
- Factory* factory = masm->isolate()->factory();
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory->allocation_site_map());
- __ j(not_equal, &miss);
// Increment the call count for monomorphic function calls.
__ SmiAddConstant(FieldOperand(rbx, rdx, times_pointer_size,
@@ -2151,18 +2043,6 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ movp(rdx, rdi);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
-
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
- // Unreachable.
- __ int3();
}
@@ -2176,7 +2056,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
StackArgumentsAccessor args(rsp, argc);
@@ -2231,7 +2111,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(rdi, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(isolate, masm, &args, argc, &non_function);
+ EmitSlowCase(masm, &args, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2239,11 +2119,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
__ j(equal, &slow_start);
+ // Check if we have an allocation site.
+ __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
+ Heap::kAllocationSiteMapRootIndex);
+ __ j(not_equal, &not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -2312,7 +2202,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// the slow case
__ bind(&slow_start);
// Check that function is not a smi.
- __ JumpIfSmi(rdi, &non_function);
+ __ JumpIfSmi(rdi, &slow);
// Check that function is a JSFunction.
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &slow);
@@ -2333,10 +2223,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
__ Push(rdx);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to edi and exit the internal frame.
__ movp(rdi, rax);
@@ -2684,220 +2571,108 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Implements "value instanceof function" operator.
- // Expected input state with no inline cache:
- // rsp[0] : return address
- // rsp[8] : function pointer
- // rsp[16] : value
- // Expected input state with an inline one-element cache:
- // rsp[0] : return address
- // rsp[8] : offset from return address to location of inline cache
- // rsp[16] : function pointer
- // rsp[24] : value
- // Returns a bitwise zero to indicate that the value
- // is and instance of the function and anything else to
- // indicate that the value is not an instance.
-
- // Fixed register usage throughout the stub.
- Register object = rax; // Object (lhs).
- Register map = rbx; // Map of the object.
- Register function = rdx; // Function (rhs).
- Register prototype = rdi; // Prototype of the function.
- Register scratch = rcx;
-
- static const int kOffsetToMapCheckValue = 2;
- static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
- // The last 4 bytes of the instruction sequence
- // movp(rdi, FieldOperand(rax, HeapObject::kMapOffset))
- // Move(kScratchRegister, Factory::the_hole_value())
- // in front of the hole value address.
- static const unsigned int kWordBeforeMapCheckValue =
- kPointerSize == kInt64Size ? 0xBA49FF78 : 0xBA41FF78;
- // The last 4 bytes of the instruction sequence
- // __ j(not_equal, &cache_miss);
- // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
- // before the offset of the hole value in the root array.
- static const unsigned int kWordBeforeResultValue =
- kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
-
- int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
-
- DCHECK_EQ(object.code(), InstanceofStub::left().code());
- DCHECK_EQ(function.code(), InstanceofStub::right().code());
-
- // Get the object and function - they are always both needed.
- // Go slow case if the object is a smi.
- Label slow;
- StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
- ARGUMENTS_DONT_CONTAIN_RECEIVER);
- if (!HasArgsInRegisters()) {
- __ movp(object, args.GetArgumentOperand(0));
- __ movp(function, args.GetArgumentOperand(1));
- }
- __ JumpIfSmi(object, &slow);
-
- // Check that the left hand is a JS object. Leave its map in rax.
- __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map);
- __ j(below, &slow);
- __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE);
- __ j(above, &slow);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- __ bind(&miss);
- }
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = rdx; // Object (lhs).
+ Register const function = rax; // Function (rhs).
+ Register const object_map = rcx; // Map of {object}.
+ Register const function_map = r8; // Map of {function}.
+ Register const function_prototype = rdi; // Prototype of {function}.
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi, Label::kNear);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &fast_case, Label::kNear);
+ __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &fast_case, Label::kNear);
+ __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(0);
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, &slow, true);
-
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
- __ j(below, &slow);
- __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
- __ j(above, &slow);
-
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
- __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
- } else {
- // The constants for the code patching are based on push instructions
- // at the call site.
- DCHECK(!HasArgsInRegisters());
- // Get return address and delta to inlined map check.
- __ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subp(kScratchRegister, args.GetArgumentOperand(2));
- if (FLAG_debug_code) {
- __ movl(scratch, Immediate(kWordBeforeMapCheckValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
- }
- __ movp(kScratchRegister,
- Operand(kScratchRegister, kOffsetToMapCheckValue));
- __ movp(Operand(kScratchRegister, 0), map);
-
- __ movp(r8, map);
- // Scratch points at the cell payload. Calculate the start of the object.
- __ subp(kScratchRegister, Immediate(Cell::kValueOffset - 1));
- __ RecordWriteField(kScratchRegister, Cell::kValueOffset, r8, function,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- }
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+ __ j(not_equal, &slow_case);
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ __ ret(0);
- // Loop through the prototype chain looking for the function prototype.
- __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset));
- Label loop, is_instance, is_not_instance;
- __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+ __ j(not_equal, &slow_case);
+
+ // Ensure that {function} has an instance prototype.
+ __ testb(FieldOperand(function_map, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &slow_case);
+
+ // Ensure that {function} is not bound.
+ Register const shared_info = kScratchRegister;
+ __ movp(shared_info,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ TestBitSharedFunctionInfoSpecialField(
+ shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
+ __ j(not_zero, &slow_case);
+
+ // Get the "prototype" (or initial map) of the {function}.
+ __ movp(function_prototype,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ Register const function_prototype_map = kScratchRegister;
+ __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
+ __ j(not_equal, &function_prototype_valid, Label::kNear);
+ __ movp(function_prototype,
+ FieldOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Label done, loop;
+ __ LoadRoot(rax, Heap::kTrueValueRootIndex);
__ bind(&loop);
- __ cmpp(scratch, prototype);
- __ j(equal, &is_instance, Label::kNear);
- __ cmpp(scratch, kScratchRegister);
- // The code at is_not_instance assumes that kScratchRegister contains a
- // non-zero GCable value (the null object in this case).
- __ j(equal, &is_not_instance, Label::kNear);
- __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ xorl(rax, rax);
- // Store bitwise zero in the cache. This is a Smi in GC terms.
- STATIC_ASSERT(kSmiTag == 0);
- __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- }
- } else {
- // Store offset of true in the root array at the inline check site.
- int true_offset = 0x100 +
- (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- DCHECK(true_offset >= 0 && true_offset < 0x100);
- __ movl(rax, Immediate(true_offset));
- __ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subp(kScratchRegister, args.GetArgumentOperand(2));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
- }
- if (!ReturnTrueFalseObject()) {
- __ Set(rax, 0);
- }
- }
- __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
- kPointerSize);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- // We have to store a non-zero value in the cache.
- __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- }
- } else {
- // Store offset of false in the root array at the inline check site.
- int false_offset = 0x100 +
- (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
- // Assert it is a 1-byte signed value.
- DCHECK(false_offset >= 0 && false_offset < 0x100);
- __ movl(rax, Immediate(false_offset));
- __ movq(kScratchRegister, StackOperandForReturnAddress(0));
- __ subp(kScratchRegister, args.GetArgumentOperand(2));
- __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
- if (FLAG_debug_code) {
- __ movl(rax, Immediate(kWordBeforeResultValue));
- __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
- }
- }
- __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
- kPointerSize);
+ __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmpp(object_prototype, function_prototype);
+ __ j(equal, &done, Label::kNear);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ j(not_equal, &loop);
+ __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+ __ bind(&done);
+ __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(0);
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- // Tail call the builtin which returns 0 or 1.
- DCHECK(!HasArgsInRegisters());
- if (HasCallSiteInlineCheck()) {
- // Remove extra value from the stack.
- __ PopReturnAddressTo(rcx);
- __ Pop(rax);
- __ PushReturnAddressFrom(rcx);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- // Call the builtin and convert 0/1 to true/false.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ Push(object);
- __ Push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- Label true_value, done;
- __ testq(rax, rax);
- __ j(zero, &true_value, Label::kNear);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ bind(&done);
- __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
- kPointerSize);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ PopReturnAddressTo(kScratchRegister);
+ __ Push(object);
+ __ Push(function);
+ __ PushReturnAddressFrom(kScratchRegister);
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -3333,7 +3108,42 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ PopReturnAddressTo(rcx); // Pop return address.
__ Push(rax); // Push argument.
__ PushReturnAddressFrom(rcx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in rax.
+ Label is_number;
+ __ JumpIfSmi(rax, &is_number, Label::kNear);
+
+ Label not_string;
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
+ // rax: receiver
+ // rdi: receiver map
+ __ j(above_equal, &not_string, Label::kNear);
+ __ Ret();
+ __ bind(&not_string);
+
+ Label not_heap_number;
+ __ CompareRoot(rdi, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpInstanceType(rdi, ODDBALL_TYPE);
+ __ j(not_equal, &not_oddball, Label::kNear);
+ __ movp(rax, FieldOperand(rax, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ PopReturnAddressTo(rcx); // Pop return address.
+ __ Push(rax); // Push argument.
+ __ PushReturnAddressFrom(rcx); // Push return address.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3474,43 +3284,40 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // rsp[0] : return address
- // rsp[8] : right string
- // rsp[16] : left string
-
- StackArgumentsAccessor args(rsp, 2, ARGUMENTS_DONT_CONTAIN_RECEIVER);
- __ movp(rdx, args.GetArgumentOperand(0)); // left
- __ movp(rax, args.GetArgumentOperand(1)); // right
+ // ----------- S t a t e -------------
+ // -- rdx : left string
+ // -- rax : right string
+ // -- rsp[0] : return address
+ // -----------------------------------
+ __ AssertString(rdx);
+ __ AssertString(rax);
// Check for identity.
Label not_same;
__ cmpp(rdx, rax);
__ j(not_equal, &not_same, Label::kNear);
__ Move(rax, Smi::FromInt(EQUAL));
- Counters* counters = isolate()->counters();
- __ IncrementCounter(counters->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
+ __ Ret();
__ bind(&not_same);
// Check that both are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
// Inline comparison of one-byte strings.
- __ IncrementCounter(counters->string_compare_native(), 1);
- // Drop arguments from the stack
- __ PopReturnAddressTo(rcx);
- __ addp(rsp, Immediate(2 * kPointerSize));
- __ PushReturnAddressFrom(rcx);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
r8);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ Push(rax);
+ __ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3543,6 +3350,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+ Label::Distance const miss_distance =
+ masm->emit_debug_code() ? Label::kFar : Label::kNear;
+
+ __ JumpIfSmi(rdx, &miss, miss_distance);
+ __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ JumpIfSmi(rax, &miss, miss_distance);
+ __ movp(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(rcx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+ __ JumpIfNotRoot(rbx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
+ __ AssertSmi(rax);
+ __ movp(rdx, FieldOperand(rdx, Oddball::kToNumberOffset));
+ __ AssertSmi(rdx);
+ __ xchgp(rax, rdx);
+ }
+ __ subp(rax, rdx);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -3844,15 +3682,24 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ j(either_smi, &miss, Label::kNear);
__ GetWeakValue(rdi, cell);
- __ movp(rcx, FieldOperand(rax, HeapObject::kMapOffset));
- __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
- __ cmpp(rcx, rdi);
+ __ cmpp(FieldOperand(rdx, HeapObject::kMapOffset), rdi);
__ j(not_equal, &miss, Label::kNear);
- __ cmpp(rbx, rdi);
+ __ cmpp(FieldOperand(rax, HeapObject::kMapOffset), rdi);
__ j(not_equal, &miss, Label::kNear);
- __ subp(rax, rdx);
- __ ret(0);
+ if (Token::IsEqualityOp(op())) {
+ __ subp(rax, rdx);
+ __ ret(0);
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ __ PopReturnAddressTo(rcx);
+ __ Push(rdx);
+ __ Push(rax);
+ __ Push(Smi::FromInt(NegativeComparisonResult(GetCondition())));
+ __ PushReturnAddressFrom(rcx);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4372,25 +4219,23 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
-static void HandleArrayCases(MacroAssembler* masm, Register receiver,
- Register key, Register vector, Register slot,
- Register feedback, Register receiver_map,
- Register scratch1, Register scratch2,
- Register scratch3, bool is_polymorphic,
- Label* miss) {
+static void HandleArrayCases(MacroAssembler* masm, Register feedback,
+ Register receiver_map, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_polymorphic, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
@@ -4492,8 +4337,8 @@ void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
__ bind(&try_array);
__ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &not_array);
- HandleArrayCases(masm, receiver, name, vector, slot, feedback, receiver_map,
- integer_slot, r11, r15, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
+ &miss);
__ bind(&not_array);
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
@@ -4551,8 +4396,8 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- integer_slot, r11, r15, true, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, true,
+ &miss);
__ bind(&not_array);
// Is it generic?
@@ -4570,8 +4415,8 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
// at least one map/handler pair.
__ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
- HandleArrayCases(masm, receiver, key, vector, slot, feedback, receiver_map,
- integer_slot, r11, r15, false, &miss);
+ HandleArrayCases(masm, feedback, receiver_map, integer_slot, r11, r15, false,
+ &miss);
__ bind(&miss);
KeyedLoadIC::GenerateMiss(masm);
@@ -4583,14 +4428,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4607,11 +4452,50 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx
+ Register key = VectorStoreICDescriptor::NameRegister(); // rcx
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax
+ Register feedback = r8;
+ Register integer_slot = r9;
+ Register receiver_map = r11;
+ DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
+
+ __ SmiToInteger32(integer_slot, slot);
+ __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
+ integer_slot, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, true,
+ &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &miss);
+
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
+ receiver, key, feedback, no_reg);
- // TODO(mvstanton): Implement.
__ bind(&miss);
StoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
}
@@ -4625,29 +4509,125 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
+ Register receiver_map,
+ Register feedback, Register scratch,
+ Register scratch1,
+ Register scratch2, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label transition_call;
+
+ Register cached_map = scratch;
+ Register counter = scratch1;
+ Register length = scratch2;
+
+ // Polymorphic, we have to loop from 0 to N - 1
+ __ movp(counter, Immediate(0));
+ __ movp(length, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ SmiToInteger32(length, length);
+
+ __ bind(&next_loop);
+ __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &transition_call);
+ __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
+ FixedArray::kHeaderSize + 2 * kPointerSize));
+ __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ jmp(feedback);
+
+ __ bind(&transition_call);
+ DCHECK(receiver_map.is(VectorStoreTransitionDescriptor::MapRegister()));
+ __ movp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(receiver_map, miss);
+ // Get the handler in value.
+ __ movp(feedback, FieldOperand(feedback, counter, times_pointer_size,
+ FixedArray::kHeaderSize + 2 * kPointerSize));
+ __ leap(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ jmp(feedback);
+
+ __ bind(&prepare_next);
+ __ addl(counter, Immediate(3));
+ __ cmpl(counter, length);
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
- Label miss;
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // rdx
+ Register key = VectorStoreICDescriptor::NameRegister(); // rcx
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // rbx
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // rdi
+ DCHECK(VectorStoreICDescriptor::ValueRegister().is(rax)); // rax
+ Register feedback = r8;
+ Register integer_slot = r9;
+ Register receiver_map = r11;
+ DCHECK(!AreAliased(feedback, integer_slot, vector, slot, receiver_map));
+
+ __ SmiToInteger32(integer_slot, slot);
+ __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Try to quickly handle the monomorphic case without knowing for sure
+ // if we have a weak cell in feedback. We do know it's safe to look
+ // at WeakCell::kValueOffset.
+ Label try_array, load_smi_map, compare_map;
+ Label not_array, miss;
+ HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector,
+ integer_slot, &compare_map, &load_smi_map, &try_array);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandlePolymorphicKeyedStoreCase(masm, receiver_map, feedback, integer_slot,
+ r15, r14, &miss);
+
+ __ bind(&not_array);
+ Label try_poly_name;
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &try_poly_name);
+
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmpp(key, feedback);
+ __ j(not_equal, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, feedback, receiver_map, integer_slot, r14, r15, false,
+ &miss);
- // TODO(mvstanton): Implement.
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, rbx);
+ __ EmitLoadTypeFeedbackVector(rbx);
CallICStub stub(isolate(), state());
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, rbx);
- CallIC_ArrayStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index 09289cf312..1344400d48 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -180,7 +180,7 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(), 7);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 52ee1444d3..4f08c7e7a6 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/x64/codegen-x64.h"
+
#if V8_TARGET_ARCH_X64
#include "src/codegen.h"
@@ -56,7 +58,7 @@ UnaryMathFunction CreateExpFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
@@ -79,7 +81,7 @@ UnaryMathFunction CreateSqrtFunction() {
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<UnaryMathFunction>(buffer);
}
@@ -690,7 +692,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc
index fe99ee9046..433c3efdfb 100644
--- a/deps/v8/src/x64/frames-x64.cc
+++ b/deps/v8/src/x64/frames-x64.cc
@@ -8,6 +8,7 @@
#include "src/frames.h"
#include "src/x64/assembler-x64-inl.h"
#include "src/x64/assembler-x64.h"
+#include "src/x64/frames-x64.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index a25d5f6f3c..a062df590f 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -31,6 +31,11 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return rdi; }
const Register VectorStoreICDescriptor::VectorRegister() { return rbx; }
+const Register VectorStoreTransitionDescriptor::SlotRegister() { return rdi; }
+const Register VectorStoreTransitionDescriptor::VectorRegister() { return rbx; }
+const Register VectorStoreTransitionDescriptor::MapRegister() { return r11; }
+
+
const Register StoreTransitionDescriptor::MapRegister() { return rbx; }
@@ -41,14 +46,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return rbx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return rax; }
-const Register InstanceofDescriptor::left() { return rax; }
-const Register InstanceofDescriptor::right() { return rdx; }
+const Register InstanceOfDescriptor::LeftRegister() { return rdx; }
+const Register InstanceOfDescriptor::RightRegister() { return rax; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return rdx; }
+const Register StringCompareDescriptor::RightRegister() { return rax; }
const Register ArgumentsAccessReadDescriptor::index() { return rdx; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return rax; }
+const Register ArgumentsAccessNewDescriptor::function() { return rdi; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return rcx; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return rdx; }
+
+
const Register ApiGetterDescriptor::function_address() { return r8; }
@@ -64,10 +78,10 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
+ SlotRegister(), VectorRegister(), MapRegister()};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -102,6 +116,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return rax; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return rax; }
@@ -182,6 +200,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rdi : the target to call
+ Register registers[] = {rdi, rax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rcx, rbx, rax};
@@ -363,6 +390,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ rax, // argument count (including receiver)
+ rbx, // address of first argument
+ rdi // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index b936edc7de..dbdd146a1e 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -7,10 +7,10 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
@@ -127,8 +127,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
- if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
- !info()->is_native() && info()->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
@@ -186,16 +185,27 @@ bool LCodeGen::GeneratePrologue() {
SaveCallerDoubles();
}
}
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
// Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info_->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ Push(rdi);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -203,7 +213,8 @@ bool LCodeGen::GeneratePrologue() {
__ Push(rdi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in rax. It replaces the context passed to us.
// It's saved in the stack and kept live in rsi.
__ movp(rsi, rax);
@@ -236,11 +247,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -743,7 +750,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -993,11 +999,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2093,7 +2094,14 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
-template<class InstrType>
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
+ int true_block = instr->TrueDestination(chunk_);
+ __ j(cc, chunk_->GetAssemblyLabel(true_block));
+}
+
+
+template <class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
int false_block = instr->FalseDestination(chunk_);
__ j(cc, chunk_->GetAssemblyLabel(false_block));
@@ -2420,41 +2428,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object) {
- DCHECK(!input.is(kScratchRegister));
-
- __ JumpIfSmi(input, is_not_object);
-
- __ CompareRoot(input, Heap::kNullValueRootIndex);
- __ j(equal, is_object);
-
- __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
- __ j(not_zero, is_not_object);
-
- __ movzxbl(kScratchRegister,
- FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
- __ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ j(below, is_not_object);
- __ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
-
- Condition true_cond = EmitIsObject(
- reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond);
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2513,16 +2486,14 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- Token::Value op = instr->op();
-
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ DCHECK(ToRegister(instr->left()).is(rdx));
+ DCHECK(ToRegister(instr->right()).is(rax));
- Condition condition = TokenToCondition(op, false);
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
__ testp(rax, rax);
- EmitBranch(instr, condition);
+ EmitBranch(instr, TokenToCondition(instr->op(), false));
}
@@ -2671,128 +2642,40 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
- InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
- __ Push(ToRegister(instr->left()));
- __ Push(ToRegister(instr->right()));
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(rax));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- Label true_value, done;
- __ testp(rax, rax);
- __ j(zero, &true_value, Label::kNear);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
- __ bind(&done);
}
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DCHECK(ToRegister(instr->context()).is(rsi));
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result, Label::kNear);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- // Use a temp register to avoid memory operands with variable lengths.
- Register map = ToRegister(instr->temp());
- __ movp(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
- __ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
- __ cmpp(map, Operand(kScratchRegister, 0));
- __ j(not_equal, &cache_miss, Label::kNear);
- // Patched to load either true or false.
- __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
-#ifdef DEBUG
- // Check that the code size between patch label and patch sites is invariant.
- Label end_of_patched_code;
- __ bind(&end_of_patched_code);
- DCHECK(true);
-#endif
- __ jmp(&done, Label::kNear);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss); // Null is not an instance of anything.
- __ CompareRoot(object, Heap::kNullValueRootIndex);
- __ j(equal, &false_result, Label::kNear);
-
- // String values are not instances of anything.
- __ JumpIfNotString(object, kScratchRegister, deferred->entry());
-
- __ bind(&false_result);
- __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
-
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = kScratchRegister;
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- {
- PushSafepointRegistersScope scope(this);
- InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
- InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
- InstanceofStub stub(isolate(), flags);
-
- __ Push(ToRegister(instr->value()));
- __ Push(instr->function());
-
- static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
- int delta =
- masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- DCHECK(delta >= 0);
- __ PushImm32(delta);
-
- // We are pushing three values on the stack but recording a
- // safepoint with two arguments because stub is going to
- // remove the third argument from the stack before jumping
- // to instanceof builtin on the slow path.
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS,
- 2);
- DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
- // Move result to a register that survives the end of the
- // PushSafepointRegisterScope.
- __ movp(kScratchRegister, rax);
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ Condition is_smi = __ CheckSmi(object);
+ EmitFalseBranch(instr, is_smi);
}
- __ testp(kScratchRegister, kScratchRegister);
- Label load_false;
- Label done;
- __ j(not_zero, &load_false, Label::kNear);
- __ LoadRoot(rax, Heap::kTrueValueRootIndex);
- __ jmp(&done, Label::kNear);
- __ bind(&load_false);
- __ LoadRoot(rax, Heap::kFalseValueRootIndex);
- __ bind(&done);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmpp(object_prototype, prototype);
+ EmitTrueBranch(instr, equal);
+ __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+ EmitFalseBranch(instr, equal);
+ __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ jmp(&loop);
}
@@ -3521,11 +3404,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Set rax to arguments count if adaption is not needed. Assumes that rax
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ Set(rax, arity);
- }
+ // Always initialize rax to the number of actual arguments.
+ __ Set(rax, arity);
// Invoke function.
if (function.is_identical_to(info()->closure())) {
@@ -3588,9 +3468,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
- if (instr->hydrogen()->pass_argument_count()) {
- __ Set(rax, instr->arity());
- }
+ __ Set(rax, instr->arity());
// Change context.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
@@ -5580,7 +5458,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// rbx = regexp literal.
// rax = regexp literal clone.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ Move(rcx, instr->hydrogen()->literals());
__ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -5623,26 +5501,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(rsi));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ Move(rbx, instr->hydrogen()->shared_info());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ Push(rsi);
- __ Push(instr->hydrogen()->shared_info());
- __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
- Heap::kFalseValueRootIndex);
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->value()).is(rbx));
@@ -5727,32 +5585,33 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label, true_distance);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ // Check for callable and not undetectable objects => true.
+ __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
+ __ andb(input,
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ __ cmpb(input, Immediate(1 << Map::kIsCallable));
final_branch_condition = equal;
} else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label, true_distance);
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
__ j(below, false_label, false_distance);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label, false_distance);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsUndetectable));
+ Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
final_branch_condition = zero;
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
} else if (String::Equals(type_name, factory->type##_string())) { \
__ JumpIfSmi(input, false_label, false_distance); \
- __ movp(input, FieldOperand(input, HeapObject::kMapOffset)); \
- __ CompareRoot(input, Heap::k##Type##MapRootIndex); \
+ __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), \
+ Heap::k##Type##MapRootIndex); \
final_branch_condition = equal;
SIMD128_TYPES(SIMD128_TYPE)
#undef SIMD128_TYPE
@@ -5793,7 +5652,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index b08eff1952..e05b310dec 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -99,8 +99,6 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
@@ -255,7 +253,9 @@ class LCodeGen: public LCodeGenBase {
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
- template<class InstrType>
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition cc);
+ template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagD(LNumberUntagD* instr, Register input,
XMMRegister result, NumberUntagDMode mode);
@@ -265,13 +265,6 @@ class LCodeGen: public LCodeGenBase {
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 965b63c4cf..9df3a7dabf 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/x64/lithium-x64.h"
+
#include <sstream>
#if V8_TARGET_ARCH_X64
@@ -181,13 +183,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -945,24 +940,16 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
@@ -972,6 +959,11 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
return new(zone()) LDebugBreak();
}
@@ -1016,21 +1008,21 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), rax);
- LOperand* right = UseFixed(instr->right(), rdx);
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), rsi);
- LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(UseFixed(instr->context(), rsi),
- UseFixed(instr->left(), rax),
- FixedTemp(rdi));
- return MarkAsCall(DefineFixed(result, rax), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
}
@@ -1734,12 +1726,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsTagged());
- return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
@@ -2542,13 +2528,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), rsi);
- LFunctionLiteral* result = new(zone()) LFunctionLiteral(context);
- return MarkAsCall(DefineFixed(result, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 2ba248642c..6129516515 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -79,19 +79,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -130,6 +128,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -242,8 +241,6 @@ class LInstruction : public ZoneObject {
return IsCall();
}
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
// Interface to the register allocator and iterators.
bool IsMarkedAsCall() const { return IsCall(); }
@@ -396,6 +393,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
LLazyBailout() : gap_instructions_size_(0) { }
@@ -985,21 +988,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 0> {
- public:
- explicit LIsObjectAndBranch(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1176,33 +1164,19 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
-
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2560,19 +2534,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 4e651274ba..ea837dca4b 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -8,7 +8,6 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/heap/heap.h"
#include "src/x64/assembler-x64.h"
@@ -698,8 +697,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
@@ -708,25 +706,25 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinEntry(rdx, id);
+ GetBuiltinEntry(rdx, native_context_index);
InvokeCode(rdx, expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the builtins object into target register.
movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- movp(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ movp(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ movp(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(rdi, id);
+ GetBuiltinFunction(rdi, native_context_index);
movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
@@ -2248,90 +2246,6 @@ void MacroAssembler::Test(const Operand& src, Smi* source) {
// ----------------------------------------------------------------------------
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- SmiToInteger32(
- mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- shrl(mask, Immediate(1));
- subp(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label is_smi;
- Label load_result_from_cache;
- JumpIfSmi(object, &is_smi);
- CheckMap(object,
- isolate()->factory()->heap_number_map(),
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- xorp(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- andp(scratch, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- shlp(scratch, Immediate(kPointerSizeLog2 + 1));
-
- Register index = scratch;
- Register probe = mask;
- movp(probe,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
- ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
- j(parity_even, not_found); // Bail out if NaN is involved.
- j(not_equal, not_found); // The cache did not contain this value.
- jmp(&load_result_from_cache);
-
- bind(&is_smi);
- SmiToInteger32(scratch, object);
- andp(scratch, mask);
- // Each entry in string cache consists of two pointer sized fields,
- // but times_twice_pointer_size (multiplication by 16) scale factor
- // is not supported by addrmode on x64 platform.
- // So we have to premultiply entry index before lookup.
- shlp(scratch, Immediate(kPointerSizeLog2 + 1));
-
- // Check if the entry is the smi we are looking for.
- cmpp(object,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize));
- j(not_equal, not_found);
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- movp(result,
- FieldOperand(number_string_cache,
- index,
- times_1,
- FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
-}
-
-
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -3395,6 +3309,18 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ testb(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Push(object);
+ CmpObjectType(object, JS_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -3447,44 +3373,17 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
Label done, loop;
movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
bind(&loop);
- JumpIfSmi(result, &done);
+ JumpIfSmi(result, &done, Label::kNear);
CmpObjectType(result, MAP_TYPE, temp);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
jmp(&loop);
bind(&done);
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- movp(kScratchRegister,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
- // field).
- TestBitSharedFunctionInfoSpecialField(kScratchRegister,
- SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
-
- // Make sure that the function has an instance prototype.
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, Label::kNear);
- }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Label* miss) {
// Get the prototype or initial map from the function.
movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -3503,15 +3402,6 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Get the prototype from the initial map.
movp(result, FieldOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- jmp(&done, Label::kNear);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, kScratchRegister);
- }
-
// All done.
bind(&done);
}
@@ -3657,10 +3547,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label invoke;
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ Set(rax, actual.immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- Set(rax, actual.immediate());
if (expected.immediate() ==
SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
// Don't worry about adapting arguments for built-ins that
@@ -3678,10 +3568,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
+ Set(rax, actual.immediate());
cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
DCHECK(expected.reg().is(rbx));
- Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
@@ -3689,6 +3579,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
j(equal, &invoke, Label::kNear);
DCHECK(actual.reg().is(rax));
DCHECK(expected.reg().is(rbx));
+ } else {
+ Move(rax, actual.reg());
}
}
@@ -3741,6 +3633,13 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ movp(vector, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ movp(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ movp(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on x64.
@@ -4580,6 +4479,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ movp(dst, GlobalObjectOperand());
+ movp(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
@@ -4772,7 +4677,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICacheWithoutIsolate(address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 7852d39c03..1fca0e3594 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -133,6 +133,10 @@ class MacroAssembler: public Assembler {
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
+ void LoadRoot(const Operand& destination, Heap::RootListIndex index) {
+ LoadRoot(kScratchRegister, index);
+ movp(destination, kScratchRegister);
+ }
void StoreRoot(Register source, Heap::RootListIndex index);
// Load a root value where the index (or part of it) is variable.
// The variable_offset register is added to the fixed_offset value
@@ -144,6 +148,21 @@ class MacroAssembler: public Assembler {
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+ Label::Distance if_equal_distance = Label::kNear) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kNear) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
+
// These functions do not arrange the registers in any particular order so
// they are not useful for calls that can cause a GC. The caller can
// exclude up to 3 registers that do not need to be saved and restored.
@@ -375,17 +394,15 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
// Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// ---------------------------------------------------------------------------
@@ -742,17 +759,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String macros.
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
// If object is a string, its map is loaded into object_map.
void JumpIfNotString(Register object,
Register object_map,
@@ -1099,6 +1105,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -1240,10 +1249,7 @@ class MacroAssembler: public Assembler {
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other register may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result, Label* miss);
// Picks out an array index from the hash field.
// Register use:
@@ -1254,6 +1260,9 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -1417,6 +1426,9 @@ class MacroAssembler: public Assembler {
return SafepointRegisterStackIndex(reg.code());
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
diff --git a/deps/v8/src/x64/simulator-x64.cc b/deps/v8/src/x64/simulator-x64.cc
index f7f2fb4bb4..701842eab3 100644
--- a/deps/v8/src/x64/simulator-x64.cc
+++ b/deps/v8/src/x64/simulator-x64.cc
@@ -1,3 +1,5 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
+#include "src/x64/simulator-x64.h"
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 0e3de87e96..4543047080 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -134,7 +134,7 @@ void RelocInfo::set_target_object(Object* target,
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Memory::Object_at(pc_) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
host() != NULL &&
@@ -200,7 +200,7 @@ void RelocInfo::set_target_cell(Cell* cell,
Address address = cell->address() + Cell::kValueOffset;
Memory::Address_at(pc_) = address;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
// TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -281,7 +281,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
visitor->VisitCodeTarget(this);
} else if (mode == RelocInfo::CELL) {
@@ -306,7 +306,7 @@ void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitEmbeddedPointer(heap, this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ Assembler::FlushICacheWithoutIsolate(pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::CELL) {
@@ -460,7 +460,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
int32_t* p = reinterpret_cast<int32_t*>(pc);
*p = target - (pc + sizeof(int32_t));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(p, sizeof(int32_t));
+ Assembler::FlushICacheWithoutIsolate(p, sizeof(int32_t));
}
}
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index 1770477ae7..323d2434f6 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -34,6 +34,8 @@
// significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
+#include "src/x87/assembler-x87.h"
+
#if V8_TARGET_ARCH_X87
#include "src/base/bits.h"
@@ -1456,12 +1458,12 @@ void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
}
-void Assembler::j(Condition cc, Handle<Code> code) {
+void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
// 0000 1111 1000 tttn #32-bit disp
EMIT(0x0F);
EMIT(0x80 | cc);
- emit(code, RelocInfo::CODE_TARGET);
+ emit(code, rmode);
}
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 6d631785d6..1f454bcd90 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -842,7 +842,8 @@ class Assembler : public AssemblerBase {
Label* L,
Label::Distance distance = Label::kFar);
void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
- void j(Condition cc, Handle<Code> code);
+ void j(Condition cc, Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
// Floating-point operations
void fld(int i);
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 9acafd2ff8..bb9829be34 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -24,13 +24,20 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// -- eax : number of arguments excluding receiver
// -- edi : called function (only guaranteed when
// extra_args requires it)
- // -- esi : context
// -- esp[0] : return address
// -- esp[4] : last argument
// -- ...
// -- esp[4 * argc] : first argument (argc == eax)
// -- esp[4 * (argc +1)] : receiver
// -----------------------------------
+ __ AssertFunction(edi);
+
+ // Make sure we operate in the context of the called function (for example
+ // ConstructStubs implemented in C++ will be run in the context of the caller
+ // instead of the callee, due to the way that [[Construct]] is defined for
+ // ordinary functions).
+ // TODO(bmeurer): Can we make this more robust?
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Insert extra arguments.
int num_extra_args = 0;
@@ -100,8 +107,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool create_memento) {
+ bool is_api_function) {
// ----------- S t a t e -------------
// -- eax: number of arguments
// -- edi: constructor function
@@ -109,9 +115,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// -- edx: original constructor
// -----------------------------------
- // Should never create mementos for api functions.
- DCHECK(!is_api_function || !create_memento);
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
@@ -192,9 +195,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
__ shl(edi, kPointerSizeLog2);
- if (create_memento) {
- __ add(edi, Immediate(AllocationMemento::kSize));
- }
__ Allocate(edi, ebx, edi, no_reg, &rt_call, NO_ALLOCATION_FLAGS);
@@ -203,7 +203,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
- // edi: start of next object (including memento if create_memento)
+ // edi: start of next object
__ mov(Operand(ebx, JSObject::kMapOffset), eax);
__ mov(ecx, factory->empty_fixed_array());
__ mov(Operand(ebx, JSObject::kPropertiesOffset), ecx);
@@ -211,7 +211,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Set extra fields in the newly allocated object.
// eax: initial map
// ebx: JSObject
- // edi: start of next object (including memento if create_memento)
+ // edi: start of next object
// esi: slack tracking counter (non-API function case)
__ mov(edx, factory->undefined_value());
__ lea(ecx, Operand(ebx, JSObject::kHeaderSize));
@@ -244,22 +244,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ bind(&no_inobject_slack_tracking);
}
- if (create_memento) {
- __ lea(esi, Operand(edi, -AllocationMemento::kSize));
- __ InitializeFieldsWithFiller(ecx, esi, edx);
-
- // Fill in memento fields if necessary.
- // esi: points to the allocated but uninitialized memento.
- __ mov(Operand(esi, AllocationMemento::kMapOffset),
- factory->allocation_memento_map());
- // Get the cell or undefined.
- __ mov(edx, Operand(esp, 3 * kPointerSize));
- __ AssertUndefinedOrAllocationSite(edx);
- __ mov(Operand(esi, AllocationMemento::kAllocationSiteOffset),
- edx);
- } else {
- __ InitializeFieldsWithFiller(ecx, edi, edx);
- }
+ __ InitializeFieldsWithFiller(ecx, edi, edx);
// Add the object tag to make the JSObject real, so that we can continue
// and jump into the continuation code at any time from now on.
@@ -275,12 +260,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edx: original constructor
__ bind(&rt_call);
int offset = kPointerSize;
- if (create_memento) {
- // Get the cell or allocation site.
- __ mov(edi, Operand(esp, kPointerSize * 3));
- __ push(edi); // argument 1: allocation site
- offset += kPointerSize;
- }
// Must restore esi (context) and edi (constructor) before calling
// runtime.
@@ -288,35 +267,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ mov(edi, Operand(esp, offset));
__ push(edi); // argument 2/1: constructor function
__ push(edx); // argument 3/2: original constructor
- if (create_memento) {
- __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 3);
- } else {
- __ CallRuntime(Runtime::kNewObject, 2);
- }
+ __ CallRuntime(Runtime::kNewObject, 2);
__ mov(ebx, eax); // store result in ebx
- // Runtime_NewObjectWithAllocationSite increments allocation count.
- // Skip the increment.
- Label count_incremented;
- if (create_memento) {
- __ jmp(&count_incremented);
- }
-
// New object allocated.
// ebx: newly allocated object
__ bind(&allocated);
- if (create_memento) {
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ cmp(ecx, masm->isolate()->factory()->undefined_value());
- __ j(equal, &count_incremented);
- // ecx is an AllocationSite. We are creating a memento from it, so we
- // need to increment the memento create count.
- __ add(FieldOperand(ecx, AllocationSite::kPretenureCreateCountOffset),
- Immediate(Smi::FromInt(1)));
- __ bind(&count_incremented);
- }
-
// Restore the parameters.
__ pop(edx); // new.target
__ pop(edi); // Constructor function.
@@ -405,12 +362,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+ Generate_JSConstructStubHelper(masm, false);
}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false);
+ Generate_JSConstructStubHelper(masm, true);
}
@@ -492,7 +449,6 @@ enum IsTagged { kEaxIsSmiTagged, kEaxIsUntaggedInt };
// Clobbers ecx, edx, edi; preserves all other registers.
static void Generate_CheckStackOverflow(MacroAssembler* masm,
- const int calleeOffset,
IsTagged eax_is_tagged) {
// eax : the number of items to be pushed to the stack
//
@@ -517,12 +473,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm,
__ j(greater, &okay); // Signed comparison.
// Out of stack space.
- __ push(Operand(ebp, calleeOffset)); // push this
- if (eax_is_tagged == kEaxIsUntaggedInt) {
- __ SmiTag(eax);
- }
- __ push(eax);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&okay);
}
@@ -538,15 +489,16 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address(Isolate::kContextAddress,
+ masm->isolate());
+ __ mov(esi, Operand::StaticVariable(context_address));
+
// Load the previous frame pointer (ebx) to access C arguments
__ mov(ebx, Operand(ebp, 0));
- // Get the function from the frame and setup the context.
- __ mov(ecx, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
- __ mov(esi, FieldOperand(ecx, JSFunction::kContextOffset));
-
// Push the function and the receiver onto the stack.
- __ push(ecx);
+ __ push(Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
__ push(Operand(ebx, EntryFrameConstants::kReceiverArgOffset));
// Load the number of arguments and setup pointer to the arguments.
@@ -554,17 +506,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(ebx, Operand(ebx, EntryFrameConstants::kArgvOffset));
// Check if we have enough stack space to push all arguments.
- // The function is the first thing that was pushed above after entering
- // the internal frame.
- const int kFunctionOffset =
- InternalFrameConstants::kCodeOffset - kPointerSize;
// Expects argument count in eax. Clobbers ecx, edx, edi.
- Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsUntaggedInt);
+ Generate_CheckStackOverflow(masm, kEaxIsUntaggedInt);
// Copy arguments to the stack in a loop.
Label loop, entry;
__ Move(ecx, Immediate(0));
- __ jmp(&entry);
+ __ jmp(&entry, Label::kNear);
__ bind(&loop);
__ mov(edx, Operand(ebx, ecx, times_4, 0)); // push parameter from argv
__ push(Operand(edx, 0)); // dereference handle
@@ -573,21 +521,18 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ cmp(ecx, eax);
__ j(not_equal, &loop);
- // Get the function from the stack and call it.
- // kPointerSize for the receiver.
- __ mov(edi, Operand(esp, eax, times_4, kPointerSize));
+ // Load the previous frame pointer (ebx) to access C arguments
+ __ mov(ebx, Operand(ebp, 0));
+
+ // Get the new.target and function from the frame.
+ __ mov(edx, Operand(ebx, EntryFrameConstants::kNewTargetArgOffset));
+ __ mov(edi, Operand(ebx, EntryFrameConstants::kFunctionArgOffset));
// Invoke the code.
- if (is_construct) {
- // No type feedback cell is available
- __ mov(ebx, masm->isolate()->factory()->undefined_value());
- CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
- __ CallStub(&stub);
- } else {
- ParameterCount actual(eax);
- __ InvokeFunction(edi, actual, CALL_FUNCTION,
- NullCallWrapper());
- }
+ Handle<Code> builtin = is_construct
+ ? masm->isolate()->builtins()->Construct()
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the internal frame. Notice that this also removes the empty.
// context and the function left on the stack by the code
@@ -660,7 +605,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
ExternalReference::address_of_real_stack_limit(masm->isolate());
__ cmp(ecx, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ bind(&ok);
// If ok, push undefined as the initial value for all register file entries.
@@ -725,8 +670,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ add(kInterpreterDispatchTableRegister,
Immediate(FixedArray::kHeaderSize - kHeapObjectTag));
- // TODO(rmcilroy) Push our context as a stack located parameter of the
- // bytecode handler.
+ // Push context as a stack located parameter to the bytecode handler.
+ DCHECK_EQ(-1, kInterpreterContextSpillSlot);
+ __ push(esi);
// Dispatch to the first bytecode handler for the function.
__ movzx_b(esi, Operand(kInterpreterBytecodeArrayRegister,
@@ -751,9 +697,14 @@ void Builtins::Generate_InterpreterExitTrampoline(MacroAssembler* masm) {
// Leave the frame (also dropping the register file).
__ leave();
- // Return droping receiver + arguments.
- // TODO(rmcilroy): Get number of arguments from BytecodeArray.
- __ Ret(1 * kPointerSize, ecx);
+
+ // Drop receiver + arguments and return.
+ __ mov(ebx, FieldOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kParameterSizeOffset));
+ __ pop(ecx);
+ __ add(esp, ebx);
+ __ push(ecx);
+ __ ret(0);
}
@@ -950,161 +901,50 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
}
+// static
void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
- Factory* factory = masm->isolate()->factory();
-
+ // Stack Layout:
+ // esp[0] : Return address
+ // esp[8] : Argument n
+ // esp[16] : Argument n-1
+ // ...
+ // esp[8 * n] : Argument 1
+ // esp[8 * (n + 1)] : Receiver (callable to call)
+ //
+ // eax contains the number of arguments, n, not counting the receiver.
+ //
// 1. Make sure we have at least one argument.
- { Label done;
+ {
+ Label done;
__ test(eax, eax);
- __ j(not_zero, &done);
- __ pop(ebx);
- __ push(Immediate(factory->undefined_value()));
- __ push(ebx);
+ __ j(not_zero, &done, Label::kNear);
+ __ PopReturnAddressTo(ebx);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
+ __ PushReturnAddressFrom(ebx);
__ inc(eax);
__ bind(&done);
}
- // 2. Get the function to call (passed as receiver) from the stack, check
- // if it is a function.
- Label slow, non_function;
- // 1 ~ return address.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ JumpIfSmi(edi, &non_function);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
-
-
- // 3a. Patch the first argument if necessary when calling a function.
- Label shift_arguments;
- __ Move(edx, Immediate(0)); // indicate regular JS_FUNCTION
- { Label convert_to_object, use_global_proxy, patch_receiver;
- // Change context eagerly in case we need the global receiver.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Do not transform the receiver for strict mode functions.
- __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Do not transform the receiver for natives (shared already in ebx).
- __ test_b(FieldOperand(ebx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &shift_arguments);
-
- // Compute the receiver in sloppy mode.
- __ mov(ebx, Operand(esp, eax, times_4, 0)); // First argument.
-
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &convert_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_proxy);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_proxy);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &shift_arguments);
-
- __ bind(&convert_to_object);
-
- { // In order to preserve argument count.
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ SmiTag(eax);
- __ push(eax);
-
- __ mov(eax, ebx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ebx, eax);
- __ Move(edx, Immediate(0)); // restore
-
- __ pop(eax);
- __ SmiUntag(eax);
- }
-
- // Restore the function to edi.
- __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
- __ jmp(&patch_receiver);
-
- __ bind(&use_global_proxy);
- __ mov(ebx,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
-
- __ bind(&patch_receiver);
- __ mov(Operand(esp, eax, times_4, 0), ebx);
-
- __ jmp(&shift_arguments);
- }
-
- // 3b. Check for function proxy.
- __ bind(&slow);
- __ Move(edx, Immediate(1)); // indicate function proxy
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(equal, &shift_arguments);
- __ bind(&non_function);
- __ Move(edx, Immediate(2)); // indicate non-function
+ // 2. Get the callable to call (passed as receiver) from the stack.
+ __ mov(edi, Operand(esp, eax, times_pointer_size, kPointerSize));
- // 3c. Patch the first argument when calling a non-function. The
- // CALL_NON_FUNCTION builtin expects the non-function callee as
- // receiver, so overwrite the first argument which will ultimately
- // become the receiver.
- __ mov(Operand(esp, eax, times_4, 0), edi);
-
- // 4. Shift arguments and return address one slot down on the stack
+ // 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
// the original first argument the new receiver.
- __ bind(&shift_arguments);
- { Label loop;
+ {
+ Label loop;
__ mov(ecx, eax);
__ bind(&loop);
- __ mov(ebx, Operand(esp, ecx, times_4, 0));
- __ mov(Operand(esp, ecx, times_4, kPointerSize), ebx);
+ __ mov(ebx, Operand(esp, ecx, times_pointer_size, 0));
+ __ mov(Operand(esp, ecx, times_pointer_size, kPointerSize), ebx);
__ dec(ecx);
__ j(not_sign, &loop); // While non-negative (to copy return address).
- __ pop(ebx); // Discard copy of return address.
+ __ pop(ebx); // Discard copy of return address.
__ dec(eax); // One fewer argument (first argument is new receiver).
}
- // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
- // or a function proxy via CALL_FUNCTION_PROXY.
- { Label function, non_proxy;
- __ test(edx, edx);
- __ j(zero, &function);
- __ Move(ebx, Immediate(0));
- __ cmp(edx, Immediate(1));
- __ j(not_equal, &non_proxy);
-
- __ pop(edx); // return address
- __ push(edi); // re-add proxy object as additional argument
- __ push(edx);
- __ inc(eax);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-
- __ bind(&non_proxy);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- __ jmp(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
- __ bind(&function);
- }
-
- // 5b. Get the code to call from the function and check that the number of
- // expected arguments matches what we're providing. If so, jump
- // (tail-call) to the code in register edx without checking arguments.
- __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ebx,
- FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(edx, FieldOperand(edi, JSFunction::kCodeEntryOffset));
- __ SmiUntag(ebx);
- __ cmp(eax, ebx);
- __ j(not_equal,
- masm->isolate()->builtins()->ArgumentsAdaptorTrampoline());
-
- ParameterCount expected(0);
- __ InvokeCode(edx, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1183,100 +1023,32 @@ static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
__ mov(edi, FieldOperand(edi, SharedFunctionInfo::kFeedbackVectorOffset));
__ push(edi);
- __ push(Operand(ebp, kFunctionOffset)); // push this
+ __ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
if (targetIsArgument) {
- __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_APPLY_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
} else {
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::APPLY_PREPARE_BUILTIN_INDEX, CALL_FUNCTION);
}
- Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
+ Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
// Push current index and limit.
const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
- __ push(eax); // limit
- __ push(Immediate(0)); // index
-
- // Get the receiver.
- __ mov(ebx, Operand(ebp, kReceiverOffset));
-
- // Check that the function is a JS function (otherwise it must be a proxy).
- Label push_receiver, use_global_proxy;
- __ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &push_receiver);
-
- // Change context eagerly to get the right global object if necessary.
- __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
-
- // Compute the receiver.
- // Do not transform the receiver for strict mode functions.
- Label call_to_object;
- __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kStrictModeByteOffset),
- 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- Factory* factory = masm->isolate()->factory();
-
- // Do not transform the receiver for natives (shared already in ecx).
- __ test_b(FieldOperand(ecx, SharedFunctionInfo::kNativeByteOffset),
- 1 << SharedFunctionInfo::kNativeBitWithinByte);
- __ j(not_equal, &push_receiver);
-
- // Compute the receiver in sloppy mode.
- // Call ToObject on the receiver if it is not an object, or use the
- // global object if it is null or undefined.
- __ JumpIfSmi(ebx, &call_to_object);
- __ cmp(ebx, factory->null_value());
- __ j(equal, &use_global_proxy);
- __ cmp(ebx, factory->undefined_value());
- __ j(equal, &use_global_proxy);
- STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
- __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &push_receiver);
-
- __ bind(&call_to_object);
- __ mov(eax, ebx);
- ToObjectStub stub(masm->isolate());
- __ CallStub(&stub);
- __ mov(ebx, eax);
- __ jmp(&push_receiver);
-
- __ bind(&use_global_proxy);
- __ mov(ebx,
- Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
-
- // Push the receiver.
- __ bind(&push_receiver);
- __ push(ebx);
+ __ Push(eax); // limit
+ __ Push(Immediate(0)); // index
+ __ Push(Operand(ebp, kReceiverOffset)); // receiver
// Loop over the arguments array, pushing each value to the stack
Generate_PushAppliedArguments(masm, kVectorOffset, kArgumentsOffset,
kIndexOffset, kLimitOffset);
- // Call the function.
- Label call_proxy;
- ParameterCount actual(eax);
+ // Call the callable.
+ // TODO(bmeurer): This should be a tail call according to ES6.
__ mov(edi, Operand(ebp, kFunctionOffset));
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &call_proxy);
- __ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
-
- frame_scope.GenerateLeaveFrame();
- __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
-
- // Call the function proxy.
- __ bind(&call_proxy);
- __ push(edi); // add function proxy as last argument
- __ inc(eax);
- __ Move(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- __ call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
// Leave internal frame.
}
@@ -1326,9 +1098,10 @@ static void Generate_ConstructHelper(MacroAssembler* masm) {
__ push(Operand(ebp, kFunctionOffset));
__ push(Operand(ebp, kArgumentsOffset));
__ push(Operand(ebp, kNewTargetOffset));
- __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ __ InvokeBuiltin(Context::REFLECT_CONSTRUCT_PREPARE_BUILTIN_INDEX,
+ CALL_FUNCTION);
- Generate_CheckStackOverflow(masm, kFunctionOffset, kEaxIsSmiTagged);
+ Generate_CheckStackOverflow(masm, kEaxIsSmiTagged);
// Push current index and limit.
const int kLimitOffset = kVectorOffset - 1 * kPointerSize;
@@ -1432,7 +1205,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
}
-void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+// static
+void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : number of arguments
// -- edi : constructor function
@@ -1440,120 +1214,137 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- esp[(argc - n) * 4] : arg[n] (zero-based)
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->string_ctor_calls(), 1);
- if (FLAG_debug_code) {
- __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ Assert(equal, kUnexpectedStringFunction);
+ // 1. Load the first argument into eax and get rid of the rest (including the
+ // receiver).
+ Label no_arguments;
+ {
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
+ __ mov(eax, ebx);
}
- // Load the first argument into eax and get rid of the rest
- // (including the receiver).
- Label no_arguments;
- __ test(eax, eax);
- __ j(zero, &no_arguments);
- __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
- __ pop(ecx);
- __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
- __ push(ecx);
- __ mov(eax, ebx);
-
- // Lookup the argument in the number to string cache.
- Label not_cached, argument_is_string;
- __ LookupNumberStringCache(eax, // Input.
- ebx, // Result.
- ecx, // Scratch 1.
- edx, // Scratch 2.
- &not_cached);
- __ IncrementCounter(counters->string_ctor_cached_number(), 1);
- __ bind(&argument_is_string);
- // ----------- S t a t e -------------
- // -- ebx : argument converted to string
- // -- edi : constructor function
- // -- esp[0] : return address
- // -----------------------------------
+ // 2a. At least one argument, return eax if it's a string, otherwise
+ // dispatch to appropriate conversion.
+ Label to_string, symbol_descriptive_string;
+ {
+ __ JumpIfSmi(eax, &to_string, Label::kNear);
+ STATIC_ASSERT(FIRST_NONSTRING_TYPE == SYMBOL_TYPE);
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx);
+ __ j(above, &to_string, Label::kNear);
+ __ j(equal, &symbol_descriptive_string, Label::kNear);
+ __ Ret();
+ }
- // Allocate a JSValue and put the tagged pointer into eax.
- Label gc_required;
- __ Allocate(JSValue::kSize,
- eax, // Result.
- ecx, // New allocation top (we ignore it).
- no_reg,
- &gc_required,
- TAG_OBJECT);
-
- // Set the map.
- __ LoadGlobalFunctionInitialMap(edi, ecx);
- if (FLAG_debug_code) {
- __ cmpb(FieldOperand(ecx, Map::kInstanceSizeOffset),
- JSValue::kSize >> kPointerSizeLog2);
- __ Assert(equal, kUnexpectedStringWrapperInstanceSize);
- __ cmpb(FieldOperand(ecx, Map::kUnusedPropertyFieldsOffset), 0);
- __ Assert(equal, kUnexpectedUnusedPropertiesOfStringWrapper);
+ // 2b. No arguments, return the empty string (and pop the receiver).
+ __ bind(&no_arguments);
+ {
+ __ LoadRoot(eax, Heap::kempty_stringRootIndex);
+ __ ret(1 * kPointerSize);
}
- __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
- // Set properties and elements.
- Factory* factory = masm->isolate()->factory();
- __ Move(ecx, Immediate(factory->empty_fixed_array()));
- __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
- __ mov(FieldOperand(eax, JSObject::kElementsOffset), ecx);
+ // 3a. Convert eax to a string.
+ __ bind(&to_string);
+ {
+ ToStringStub stub(masm->isolate());
+ __ TailCallStub(&stub);
+ }
- // Set the value.
- __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+ // 3b. Convert symbol in eax to a string.
+ __ bind(&symbol_descriptive_string);
+ {
+ __ PopReturnAddressTo(ecx);
+ __ Push(eax);
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kSymbolDescriptiveString, 1, 1);
+ }
+}
- // Ensure the object is fully initialized.
- STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
- // We're done. Return.
- __ ret(0);
+// static
+void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : number of arguments
+ // -- edi : constructor function
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
- // The argument was not found in the number to string cache. Check
- // if it's a string already before calling the conversion builtin.
- Label convert_argument;
- __ bind(&not_cached);
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfSmi(eax, &convert_argument);
- Condition is_string = masm->IsObjectStringType(eax, ebx, ecx);
- __ j(NegateCondition(is_string), &convert_argument);
- __ mov(ebx, eax);
- __ IncrementCounter(counters->string_ctor_string_value(), 1);
- __ jmp(&argument_is_string);
-
- // Invoke the conversion builtin and put the result into ebx.
- __ bind(&convert_argument);
- __ IncrementCounter(counters->string_ctor_conversions(), 1);
+ // 1. Load the first argument into ebx and get rid of the rest (including the
+ // receiver).
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edi); // Preserve the function.
- __ push(eax);
- __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
- __ pop(edi);
+ Label no_arguments, done;
+ __ test(eax, eax);
+ __ j(zero, &no_arguments, Label::kNear);
+ __ mov(ebx, Operand(esp, eax, times_pointer_size, 0));
+ __ jmp(&done, Label::kNear);
+ __ bind(&no_arguments);
+ __ LoadRoot(ebx, Heap::kempty_stringRootIndex);
+ __ bind(&done);
+ __ PopReturnAddressTo(ecx);
+ __ lea(esp, Operand(esp, eax, times_pointer_size, kPointerSize));
+ __ PushReturnAddressFrom(ecx);
}
- __ mov(ebx, eax);
- __ jmp(&argument_is_string);
- // Load the empty string into ebx, remove the receiver from the
- // stack, and jump back to the case where the argument is a string.
- __ bind(&no_arguments);
- __ Move(ebx, Immediate(factory->empty_string()));
- __ pop(ecx);
- __ lea(esp, Operand(esp, kPointerSize));
- __ push(ecx);
- __ jmp(&argument_is_string);
+ // 2. Make sure ebx is a string.
+ {
+ Label convert, done_convert;
+ __ JumpIfSmi(ebx, &convert, Label::kNear);
+ __ CmpObjectType(ebx, FIRST_NONSTRING_TYPE, edx);
+ __ j(below, &done_convert);
+ __ bind(&convert);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ ToStringStub stub(masm->isolate());
+ __ Push(edi);
+ __ Move(eax, ebx);
+ __ CallStub(&stub);
+ __ Move(ebx, eax);
+ __ Pop(edi);
+ }
+ __ bind(&done_convert);
+ }
- // At this point the argument is already a string. Call runtime to
- // create a string wrapper.
- __ bind(&gc_required);
- __ IncrementCounter(counters->string_ctor_gc_required(), 1);
+ // 3. Allocate a JSValue wrapper for the string.
{
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(ebx);
- __ CallRuntime(Runtime::kNewStringWrapper, 1);
+ // ----------- S t a t e -------------
+ // -- ebx : the first argument
+ // -- edi : constructor function
+ // -----------------------------------
+
+ Label allocate, done_allocate;
+ __ Allocate(JSValue::kSize, eax, ecx, no_reg, &allocate, TAG_OBJECT);
+ __ bind(&done_allocate);
+
+ // Initialize the JSValue in eax.
+ __ LoadGlobalFunctionInitialMap(edi, ecx);
+ __ mov(FieldOperand(eax, HeapObject::kMapOffset), ecx);
+ __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+ masm->isolate()->factory()->empty_fixed_array());
+ __ mov(FieldOperand(eax, JSValue::kValueOffset), ebx);
+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+ __ Ret();
+
+ // Fallback to the runtime to allocate in new space.
+ __ bind(&allocate);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(ebx);
+ __ Push(edi);
+ __ Push(Smi::FromInt(JSValue::kSize));
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+ __ Pop(edi);
+ __ Pop(ebx);
+ }
+ __ jmp(&done_allocate);
}
- __ ret(0);
}
@@ -1618,6 +1409,258 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
}
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+
+ Label convert, convert_global_proxy, convert_to_object, done_convert;
+ __ AssertFunction(edi);
+ // TODO(bmeurer): Throw a TypeError if function's [[FunctionKind]] internal
+ // slot is "classConstructor".
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
+ SharedFunctionInfo::kStrictModeByteOffset);
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ __ test_b(FieldOperand(edx, SharedFunctionInfo::kNativeByteOffset),
+ (1 << SharedFunctionInfo::kNativeBitWithinByte) |
+ (1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+ __ j(not_zero, &done_convert);
+ {
+ __ mov(ecx, Operand(esp, eax, times_pointer_size, kPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ecx : the receiver
+ // -- edx : the shared function info.
+ // -- edi : the function to call (checked to be a JSFunction)
+ // -- esi : the function context.
+ // -----------------------------------
+
+ Label convert_receiver;
+ __ JumpIfSmi(ecx, &convert_to_object, Label::kNear);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(ecx, FIRST_JS_RECEIVER_TYPE, ebx);
+ __ j(above_equal, &done_convert);
+ __ JumpIfRoot(ecx, Heap::kUndefinedValueRootIndex, &convert_global_proxy,
+ Label::kNear);
+ __ JumpIfNotRoot(ecx, Heap::kNullValueRootIndex, &convert_to_object,
+ Label::kNear);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(ecx);
+ }
+ __ jmp(&convert_receiver);
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(eax);
+ __ Push(eax);
+ __ Push(edi);
+ __ mov(eax, ecx);
+ ToObjectStub stub(masm->isolate());
+ __ CallStub(&stub);
+ __ mov(ecx, eax);
+ __ Pop(edi);
+ __ Pop(eax);
+ __ SmiUntag(eax);
+ }
+ __ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), ecx);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the shared function info.
+ // -- edi : the function to call (checked to be a JSFunction)
+ // -- esi : the function context.
+ // -----------------------------------
+
+ __ mov(ebx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(ebx);
+ ParameterCount actual(eax);
+ ParameterCount expected(ebx);
+ __ InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset), expected,
+ actual, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_function, non_smi;
+ __ JumpIfSmi(edi, &non_callable);
+ __ bind(&non_smi);
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+ __ j(equal, masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(not_equal, &non_function);
+
+ // 1. Call to function proxy.
+ // TODO(neis): This doesn't match the ES6 spec for [[Call]] on proxies.
+ __ mov(edi, FieldOperand(edi, JSFunctionProxy::kCallTrapOffset));
+ __ AssertNotSmi(edi);
+ __ jmp(&non_smi);
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ __ bind(&non_function);
+ // Check if target has a [[Call]] internal method.
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsCallable);
+ __ j(zero, &non_callable, Label::kNear);
+ // Overwrite the original receiver with the (original) target.
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, edi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(), RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor (checked to be a JSFunction)
+ // -- edi : the constructor to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(edx);
+ __ AssertFunction(edi);
+
+ // Calling convention for function specific ConstructStubs require
+ // ebx to contain either an AllocationSite or undefined.
+ __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
+}
+
+
+// static
+void Builtins::Generate_ConstructProxy(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- edi : the constructor to call (checked to be a JSFunctionProxy)
+ // -----------------------------------
+
+ // TODO(neis): This doesn't match the ES6 spec for [[Construct]] on proxies.
+ __ mov(edi, FieldOperand(edi, JSFunctionProxy::kConstructTrapOffset));
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edx : the original constructor (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -- edi : the constructor to call (can be any Object)
+ // -----------------------------------
+
+ // Check if target has a [[Construct]] internal method.
+ Label non_constructor;
+ __ JumpIfSmi(edi, &non_constructor, Label::kNear);
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ test_b(FieldOperand(ecx, Map::kBitFieldOffset), 1 << Map::kIsConstructor);
+ __ j(zero, &non_constructor, Label::kNear);
+
+ // Dispatch based on instance type.
+ __ CmpInstanceType(ecx, JS_FUNCTION_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructFunction(),
+ RelocInfo::CODE_TARGET);
+ __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
+ __ j(equal, masm->isolate()->builtins()->ConstructProxy(),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ mov(Operand(esp, eax, times_pointer_size, kPointerSize), edi);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadGlobalFunction(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, edi);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(edi);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable, 1);
+ }
+}
+
+
+// static
+void Builtins::Generate_PushArgsAndCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- eax : the number of arguments (not including the receiver)
+ // -- ebx : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- edi : the target to call (can be any Object).
+
+ // Pop return address to allow tail-call after pushing arguments.
+ __ Pop(edx);
+
+ // Find the address of the last argument.
+ __ mov(ecx, eax);
+ __ add(ecx, Immediate(1)); // Add one for receiver.
+ __ shl(ecx, kPointerSizeLog2);
+ __ neg(ecx);
+ __ add(ecx, ebx);
+
+ // Push the arguments.
+ Label loop_header, loop_check;
+ __ jmp(&loop_check);
+ __ bind(&loop_header);
+ __ Push(Operand(ebx, 0));
+ __ sub(ebx, Immediate(kPointerSize));
+ __ bind(&loop_check);
+ __ cmp(ebx, ecx);
+ __ j(greater, &loop_header, Label::kNear);
+
+ // Call the target.
+ __ Push(edx); // Re-push return address.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax : actual number of arguments
@@ -1644,16 +1687,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Copy receiver and all expected arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
- __ lea(eax, Operand(ebp, eax, times_4, offset));
- __ mov(edi, -1); // account for receiver
+ __ lea(edi, Operand(ebp, eax, times_4, offset));
+ __ mov(eax, -1); // account for receiver
Label copy;
__ bind(&copy);
- __ inc(edi);
- __ push(Operand(eax, 0));
- __ sub(eax, Immediate(kPointerSize));
- __ cmp(edi, ebx);
+ __ inc(eax);
+ __ push(Operand(edi, 0));
+ __ sub(edi, Immediate(kPointerSize));
+ __ cmp(eax, ebx);
__ j(less, &copy);
+ // eax now contains the expected number of arguments.
__ jmp(&invoke);
}
@@ -1682,6 +1726,9 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&no_strong_error);
EnterArgumentsAdaptorFrame(masm);
+ // Remember expected arguments in ecx.
+ __ mov(ecx, ebx);
+
// Copy receiver and all actual arguments.
const int offset = StandardFrameConstants::kCallerSPOffset;
__ lea(edi, Operand(ebp, eax, times_4, offset));
@@ -1706,12 +1753,17 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ push(Immediate(masm->isolate()->factory()->undefined_value()));
__ cmp(eax, ebx);
__ j(less, &fill);
+
+ // Restore expected arguments.
+ __ mov(eax, ecx);
}
// Call the entry point.
__ bind(&invoke);
// Restore function pointer.
__ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ // eax : expected number of arguments
+ // edi : function (passed through to callee)
__ call(edx);
// Store offset of return address for deoptimizer.
@@ -1731,7 +1783,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
{
FrameScope frame(masm, StackFrame::MANUAL);
EnterArgumentsAdaptorFrame(masm);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ __ CallRuntime(Runtime::kThrowStackOverflow, 0);
__ int3();
}
}
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index bba43276fa..0d59b18068 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -15,6 +15,7 @@
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/runtime/runtime.h"
+#include "src/x87/code-stubs-x87.h"
#include "src/x87/frames-x87.h"
namespace v8 {
@@ -492,72 +493,78 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
// esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
+
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &runtime, Label::kNear);
// Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
__ bind(&runtime);
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
// esp[0] : return address
- // esp[4] : number of parameters (tagged)
- // esp[8] : receiver displacement
- // esp[12] : function
- // ebx = parameter count (tagged)
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
// Check if the calling frame is an arguments adaptor frame.
- // TODO(rossberg): Factor out some of the bits that are shared with the other
- // Generate* functions.
- Label runtime;
- Label adaptor_frame, try_allocate;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ Label adaptor_frame, try_allocate, runtime;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &adaptor_frame, Label::kNear);
// No adaptor, parameter count = argument count.
- __ mov(ecx, ebx);
+ __ mov(ebx, ecx);
+ __ push(ecx);
__ jmp(&try_allocate, Label::kNear);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
+ __ mov(ebx, ecx);
+ __ push(ecx);
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ lea(edx, Operand(edx, ecx, times_2,
StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
// ebx = parameter count (tagged)
// ecx = argument count (smi-tagged)
- // esp[4] = parameter count (tagged)
- // esp[8] = address of receiver argument
// Compute the mapped parameter count = min(ebx, ecx) in ebx.
__ cmp(ebx, ecx);
__ j(less_equal, &try_allocate, Label::kNear);
__ mov(ebx, ecx);
+ // Save mapped parameter count and function.
__ bind(&try_allocate);
-
- // Save mapped parameter count.
+ __ push(edi);
__ push(ebx);
// Compute the sizes of backing store, parameter map, and arguments object.
@@ -577,13 +584,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
__ add(ebx, Immediate(Heap::kSloppyArgumentsObjectSize));
// Do the allocation of all three objects in one go.
- __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
+ __ Allocate(ebx, eax, edi, no_reg, &runtime, TAG_OBJECT);
// eax = address of new object(s) (tagged)
// ecx = argument count (smi-tagged)
// esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
// esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
// Get the arguments map from the current native context into edi.
Label has_mapped_parameters, instantiate;
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -606,8 +613,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ecx = argument count (smi-tagged)
// edi = address of arguments map (tagged)
// esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
// esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
// Copy the JS object part.
__ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
__ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
@@ -617,11 +624,11 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Set up the callee in-object property.
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ mov(edx, Operand(esp, 4 * kPointerSize));
- __ AssertNotSmi(edx);
+ __ mov(edi, Operand(esp, 1 * kPointerSize));
+ __ AssertNotSmi(edi);
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize),
- edx);
+ Heap::kArgumentsCalleeIndex * kPointerSize),
+ edi);
// Use the length (smi tagged) and set that as an in-object property too.
__ AssertSmi(ecx);
@@ -639,11 +646,13 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// eax = address of new object (tagged)
// ebx = mapped parameter count (tagged)
// ecx = argument count (tagged)
+ // edx = address of receiver argument
// edi = address of parameter map or backing store (tagged)
// esp[0] = mapped parameter count (tagged)
+ // esp[4] = function
// esp[8] = parameter count (tagged)
- // esp[12] = address of receiver argument
- // Free a register.
+ // Free two registers.
+ __ push(edx);
__ push(eax);
// Initialize parameter map. If there are no mapped arguments, we're done.
@@ -669,9 +678,9 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// We loop from right to left.
Label parameters_loop, parameters_test;
__ push(ecx);
- __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
- __ add(ebx, Operand(esp, 4 * kPointerSize));
+ __ add(ebx, Operand(esp, 5 * kPointerSize));
__ sub(ebx, eax);
__ mov(ecx, isolate()->factory()->the_hole_value());
__ mov(edx, edi);
@@ -683,9 +692,10 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// edi = address of backing store (tagged)
// esp[0] = argument count (tagged)
// esp[4] = address of new object (tagged)
- // esp[8] = mapped parameter count (tagged)
- // esp[16] = parameter count (tagged)
- // esp[20] = address of receiver argument
+ // esp[8] = address of receiver argument
+ // esp[12] = mapped parameter count (tagged)
+ // esp[16] = function
+ // esp[20] = parameter count (tagged)
__ jmp(&parameters_test, Label::kNear);
__ bind(&parameters_loop);
@@ -703,17 +713,18 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// ecx = argument count (tagged)
// edi = address of backing store (tagged)
// esp[0] = address of new object (tagged)
- // esp[4] = mapped parameter count (tagged)
- // esp[12] = parameter count (tagged)
- // esp[16] = address of receiver argument
+ // esp[4] = address of receiver argument
+ // esp[8] = mapped parameter count (tagged)
+ // esp[12] = function
+ // esp[16] = parameter count (tagged)
// Copy arguments header and remaining slots (if there are any).
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(isolate()->factory()->fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
Label arguments_loop, arguments_test;
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 4 * kPointerSize));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize));
+ __ mov(edx, Operand(esp, 1 * kPointerSize));
__ sub(edx, ebx); // Is there a smarter way to do negative scaling?
__ sub(edx, ebx);
__ jmp(&arguments_test, Label::kNear);
@@ -730,57 +741,60 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// Restore.
__ pop(eax); // Address of arguments object.
- __ pop(ebx); // Parameter count.
+ __ Drop(4);
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
+ // Return.
+ __ ret(0);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ pop(eax); // Remove saved parameter count.
- __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
+ __ pop(eax); // Remove saved mapped parameter count.
+ __ pop(edi); // Pop saved function.
+ __ pop(eax); // Remove saved parameter count.
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
__ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
}
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // ecx : number of parameters (tagged)
+ // edx : parameters pointer
+ // edi : function
// esp[0] : return address
- // esp[4] : number of parameters
- // esp[8] : receiver displacement
- // esp[12] : function
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor_frame, try_allocate, runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor_frame, Label::kNear);
+ DCHECK(edi.is(ArgumentsAccessNewDescriptor::function()));
+ DCHECK(ecx.is(ArgumentsAccessNewDescriptor::parameter_count()));
+ DCHECK(edx.is(ArgumentsAccessNewDescriptor::parameter_pointer()));
- // Get the length from the frame.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
- __ jmp(&try_allocate, Label::kNear);
+ // Check if the calling frame is an arguments adaptor frame.
+ Label try_allocate, runtime;
+ __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(eax, Operand(ebx, StandardFrameConstants::kContextOffset));
+ __ cmp(eax, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ j(not_equal, &try_allocate, Label::kNear);
// Patch the arguments.length and the parameters pointer.
- __ bind(&adaptor_frame);
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ mov(Operand(esp, 2 * kPointerSize), edx);
+ __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lea(edx,
+ Operand(ebx, ecx, times_2, StandardFrameConstants::kCallerSPOffset));
// Try the new space allocation. Start out with computing the size of
// the arguments object and the elements array.
Label add_arguments_object;
__ bind(&try_allocate);
- __ test(ecx, ecx);
+ __ mov(eax, ecx);
+ __ test(eax, eax);
__ j(zero, &add_arguments_object, Label::kNear);
- __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
+ __ lea(eax, Operand(eax, times_2, FixedArray::kHeaderSize));
__ bind(&add_arguments_object);
- __ add(ecx, Immediate(Heap::kStrictArgumentsObjectSize));
+ __ add(eax, Immediate(Heap::kStrictArgumentsObjectSize));
// Do the allocation of both objects in one go.
- __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
+ __ Allocate(eax, eax, ebx, no_reg, &runtime, TAG_OBJECT);
// Get the arguments map from the current native context.
__ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
@@ -796,7 +810,6 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
__ AssertSmi(ecx);
__ mov(FieldOperand(eax, JSObject::kHeaderSize +
Heap::kArgumentsLengthIndex * kPointerSize),
@@ -807,17 +820,14 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ test(ecx, ecx);
__ j(zero, &done, Label::kNear);
- // Get the parameters pointer from the stack.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
-
// Set up the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
__ lea(edi, Operand(eax, Heap::kStrictArgumentsObjectSize));
__ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(isolate()->factory()->fixed_array_map()));
-
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+
// Untag the length for the loop below.
__ SmiUntag(ecx);
@@ -831,42 +841,21 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ dec(ecx);
__ j(not_zero, &loop);
- // Return and remove the on-stack parameters.
+ // Return.
__ bind(&done);
- __ ret(3 * kPointerSize);
+ __ ret(0);
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
+ __ pop(eax); // Pop return address.
+ __ push(edi); // Push function.
+ __ push(edx); // Push parameters pointer.
+ __ push(ecx); // Push parameter count.
+ __ push(eax); // Push return address.
__ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
}
-void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
- // esp[0] : return address
- // esp[4] : language mode
- // esp[8] : index of rest parameter
- // esp[12] : number of parameters
- // esp[16] : receiver displacement
-
- // Check if the calling frame is an arguments adaptor frame.
- Label runtime;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(not_equal, &runtime);
-
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 3 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2,
- StandardFrameConstants::kCallerSPOffset));
- __ mov(Operand(esp, 4 * kPointerSize), edx);
-
- __ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewRestParam, 4, 1);
-}
-
-
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@@ -1599,25 +1588,21 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
__ push(eax);
// Figure out which native to call and setup the arguments.
- if (cc == equal && strict()) {
+ if (cc == equal) {
__ push(ecx);
- __ TailCallRuntime(Runtime::kStrictEquals, 2, 1);
+ __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals, 2,
+ 1);
} else {
- Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = Builtins::EQUALS;
- } else {
- builtin =
- is_strong(strength()) ? Builtins::COMPARE_STRONG : Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- }
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
// Restore return address on the stack.
__ push(ecx);
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ __ TailCallRuntime(
+ is_strong(strength()) ? Runtime::kCompare_Strong : Runtime::kCompare, 3,
+ 1);
}
__ bind(&miss);
@@ -1695,27 +1680,25 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
__ j(equal, &done, Label::kFar);
__ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
Heap::kWeakCellMapRootIndex);
- __ j(not_equal, FLAG_pretenuring_call_new ? &miss : &check_allocation_site);
+ __ j(not_equal, &check_allocation_site);
// If the weak cell is cleared, we have a new chance to become monomorphic.
__ JumpIfSmi(FieldOperand(ecx, WeakCell::kValueOffset), &initialize);
__ jmp(&megamorphic);
- if (!FLAG_pretenuring_call_new) {
- __ bind(&check_allocation_site);
- // If we came here, we need to see if we are the array function.
- // If we didn't have a matching function, and we didn't find the megamorph
- // sentinel, then we have in the slot either some other function or an
- // AllocationSite.
- __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
- __ j(not_equal, &miss);
+ __ bind(&check_allocation_site);
+ // If we came here, we need to see if we are the array function.
+ // If we didn't have a matching function, and we didn't find the megamorph
+ // sentinel, then we have in the slot either some other function or an
+ // AllocationSite.
+ __ CompareRoot(FieldOperand(ecx, 0), Heap::kAllocationSiteMapRootIndex);
+ __ j(not_equal, &miss);
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, &megamorphic);
- __ jmp(&done, Label::kFar);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &megamorphic);
+ __ jmp(&done, Label::kFar);
__ bind(&miss);
@@ -1734,24 +1717,21 @@ static void GenerateRecordCallTarget(MacroAssembler* masm, bool is_super) {
// An uninitialized cache is patched with the function or sentinel to
// indicate the ElementsKind if function is the Array constructor.
__ bind(&initialize);
- if (!FLAG_pretenuring_call_new) {
- // Make sure the function is the Array() function
- __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
- __ cmp(edi, ecx);
- __ j(not_equal, &not_array_function);
-
- // The target function is the Array constructor,
- // Create an AllocationSite if we don't already have it, store it in the
- // slot.
- CreateAllocationSiteStub create_stub(isolate);
- CallStubInRecordCallTarget(masm, &create_stub, is_super);
- __ jmp(&done);
-
- __ bind(&not_array_function);
- }
+ // Make sure the function is the Array() function
+ __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+ __ cmp(edi, ecx);
+ __ j(not_equal, &not_array_function);
- CreateWeakCellStub create_stub(isolate);
+ // The target function is the Array constructor,
+ // Create an AllocationSite if we don't already have it, store it in the
+ // slot.
+ CreateAllocationSiteStub create_stub(isolate);
CallStubInRecordCallTarget(masm, &create_stub, is_super);
+ __ jmp(&done);
+
+ __ bind(&not_array_function);
+ CreateWeakCellStub weak_cell_stub(isolate);
+ CallStubInRecordCallTarget(masm, &weak_cell_stub, is_super);
__ bind(&done);
}
@@ -1770,33 +1750,9 @@ static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
}
-static void EmitSlowCase(Isolate* isolate,
- MacroAssembler* masm,
- int argc,
- Label* non_function) {
- // Check for function proxy.
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, non_function);
- __ pop(ecx);
- __ push(edi); // put proxy as additional argument under return address
- __ push(ecx);
- __ Move(eax, Immediate(argc + 1));
- __ Move(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
- {
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
- }
-
- // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
- // of the original receiver from the call site).
- __ bind(non_function);
- __ mov(Operand(esp, (argc + 1) * kPointerSize), edi);
- __ Move(eax, Immediate(argc));
- __ Move(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
+static void EmitSlowCase(Isolate* isolate, MacroAssembler* masm, int argc) {
+ __ Set(eax, argc);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -1817,11 +1773,11 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
int argc, bool needs_checks,
bool call_as_method) {
// edi : the function to call
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
if (needs_checks) {
// Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
@@ -1856,8 +1812,7 @@ static void CallFunctionNoFeedback(MacroAssembler* masm,
if (needs_checks) {
// Slow-case: Non-function called.
__ bind(&slow);
- // (non_function is bound in EmitSlowCase)
- EmitSlowCase(masm->isolate(), masm, argc, &non_function);
+ EmitSlowCase(masm->isolate(), masm, argc);
}
if (call_as_method) {
@@ -1878,39 +1833,31 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// ecx : original constructor (for IsSuperConstructorCall)
// edx : slot in feedback vector (Smi, for RecordCallTarget)
// edi : constructor function
- Label slow, non_function_call;
if (IsSuperConstructorCall()) {
__ push(ecx);
}
+ Label non_function;
// Check that function is not a smi.
- __ JumpIfSmi(edi, &non_function_call);
+ __ JumpIfSmi(edi, &non_function);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
- __ j(not_equal, &slow);
+ __ j(not_equal, &non_function);
if (RecordCallTarget()) {
GenerateRecordCallTarget(masm, IsSuperConstructorCall());
- if (FLAG_pretenuring_call_new) {
- // Put the AllocationSite from the feedback vector into ebx.
- // By adding kPointerSize we encode that we know the AllocationSite
- // entry is at the feedback vector slot given by edx + 1.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- } else {
- Label feedback_register_initialized;
- // Put the AllocationSite from the feedback vector into ebx, or undefined.
- __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Handle<Map> allocation_site_map =
- isolate()->factory()->allocation_site_map();
- __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
- __ j(equal, &feedback_register_initialized);
- __ mov(ebx, isolate()->factory()->undefined_value());
- __ bind(&feedback_register_initialized);
- }
+ Label feedback_register_initialized;
+ // Put the AllocationSite from the feedback vector into ebx, or undefined.
+ __ mov(ebx, FieldOperand(ebx, edx, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ Handle<Map> allocation_site_map =
+ isolate()->factory()->allocation_site_map();
+ __ cmp(FieldOperand(ebx, 0), Immediate(allocation_site_map));
+ __ j(equal, &feedback_register_initialized);
+ __ mov(ebx, isolate()->factory()->undefined_value());
+ __ bind(&feedback_register_initialized);
__ AssertUndefinedOrAllocationSite(ebx);
}
@@ -1922,69 +1869,33 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ mov(edx, edi);
}
- // Jump to the function-specific construct stub.
- Register jmp_reg = ecx;
- __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
- __ mov(jmp_reg, FieldOperand(jmp_reg,
- SharedFunctionInfo::kConstructStubOffset));
- __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize));
- __ jmp(jmp_reg);
-
- // edi: called object
- // eax: number of arguments
- // ecx: object map
- // esp[0]: original receiver (for IsSuperConstructorCall)
- Label do_call;
- __ bind(&slow);
- __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
- __ j(not_equal, &non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- if (IsSuperConstructorCall()) {
- __ Drop(1);
- }
- // Set expected number of arguments to zero (not changing eax).
- __ Move(ebx, Immediate(0));
- Handle<Code> arguments_adaptor =
- isolate()->builtins()->ArgumentsAdaptorTrampoline();
- __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
-}
-
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kConstructStubOffset));
+ __ lea(ecx, FieldOperand(ecx, Code::kHeaderSize));
+ __ jmp(ecx);
-static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
- __ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
- __ mov(vector, FieldOperand(vector,
- SharedFunctionInfo::kFeedbackVectorOffset));
+ __ bind(&non_function);
+ if (IsSuperConstructorCall()) __ Drop(1);
+ __ mov(edx, edi);
+ __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// edi - function
// edx - slot id
// ebx - vector
- Label miss;
- int argc = arg_count();
- ParameterCount actual(argc);
-
__ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
__ cmp(edi, ecx);
- __ j(not_equal, &miss);
+ __ j(not_equal, miss);
__ mov(eax, arg_count());
+ // Reload ecx.
__ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize));
- // Verify that ecx contains an AllocationSite
- Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- factory->allocation_site_map());
- __ j(not_equal, &miss);
-
// Increment the call count for monomorphic function calls.
__ add(FieldOperand(ebx, edx, times_half_pointer_size,
FixedArray::kHeaderSize + kPointerSize),
@@ -1995,17 +1906,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
- __ bind(&miss);
- GenerateMiss(masm);
-
- // The slow case, we need this no matter what to complete a call after a miss.
- CallFunctionNoFeedback(masm,
- arg_count(),
- true,
- CallAsMethod());
-
// Unreachable.
- __ int3();
}
@@ -2019,7 +1920,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
const int generic_offset =
FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
Label extra_checks_or_miss, slow_start;
- Label slow, non_function, wrap, cont;
+ Label slow, wrap, cont;
Label have_js_function;
int argc = arg_count();
ParameterCount actual(argc);
@@ -2072,7 +1973,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ InvokeFunction(edi, actual, JUMP_FUNCTION, NullCallWrapper());
__ bind(&slow);
- EmitSlowCase(isolate, masm, argc, &non_function);
+ EmitSlowCase(isolate, masm, argc);
if (CallAsMethod()) {
__ bind(&wrap);
@@ -2080,11 +1981,21 @@ void CallICStub::Generate(MacroAssembler* masm) {
}
__ bind(&extra_checks_or_miss);
- Label uninitialized, miss;
+ Label uninitialized, miss, not_allocation_site;
__ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
__ j(equal, &slow_start);
+ // Check if we have an allocation site.
+ __ CompareRoot(FieldOperand(ecx, HeapObject::kMapOffset),
+ Heap::kAllocationSiteMapRootIndex);
+ __ j(not_equal, &not_allocation_site);
+
+ // We have an allocation site.
+ HandleArrayCase(masm, &miss);
+
+ __ bind(&not_allocation_site);
+
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
@@ -2153,7 +2064,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
__ bind(&slow_start);
// Check that the function really is a JavaScript function.
- __ JumpIfSmi(edi, &non_function);
+ __ JumpIfSmi(edi, &slow);
// Goto slow case if we do not have a function.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
@@ -2168,16 +2079,13 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::INTERNAL);
- // Push the receiver and the function and feedback info.
+ // Push the function and feedback info.
__ push(edi);
__ push(ebx);
__ push(edx);
// Call the entry.
- Runtime::FunctionId id = GetICState() == DEFAULT
- ? Runtime::kCallIC_Miss
- : Runtime::kCallIC_Customization_Miss;
- __ CallRuntime(id, 3);
+ __ CallRuntime(Runtime::kCallIC_Miss, 3);
// Move result to edi and exit the internal frame.
__ mov(edi, eax);
@@ -2430,233 +2338,108 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
}
-// Generate stub code for instanceof.
-// This code can patch a call site inlined cache of the instance of check,
-// which looks like this.
-//
-// 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
-// 75 0a jne <some near label>
-// b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
-//
-// If call site patching is requested the stack will have the delta from the
-// return address to the cmp instruction just below the return address. This
-// also means that call site patching can only take place with arguments in
-// registers. TOS looks like this when call site patching is requested
-//
-// esp[0] : return address
-// esp[4] : delta from return address to cmp instruction
-//
-void InstanceofStub::Generate(MacroAssembler* masm) {
- // Call site inlining and patching implies arguments in registers.
- DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-
- // Fixed register usage throughout the stub.
- Register object = eax; // Object (lhs).
- Register map = ebx; // Map of the object.
- Register function = edx; // Function (rhs).
- Register prototype = edi; // Prototype of the function.
- Register scratch = ecx;
-
- // Constants describing the call site code to patch.
- static const int kDeltaToCmpImmediate = 2;
- static const int kDeltaToMov = 8;
- static const int kDeltaToMovImmediate = 9;
- static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
- static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
- static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
-
- DCHECK_EQ(object.code(), InstanceofStub::left().code());
- DCHECK_EQ(function.code(), InstanceofStub::right().code());
-
- // Get the object and function - they are always both needed.
- Label slow, not_js_object;
- if (!HasArgsInRegisters()) {
- __ mov(object, Operand(esp, 2 * kPointerSize));
- __ mov(function, Operand(esp, 1 * kPointerSize));
- }
+void InstanceOfStub::Generate(MacroAssembler* masm) {
+ Register const object = edx; // Object (lhs).
+ Register const function = eax; // Function (rhs).
+ Register const object_map = ecx; // Map of {object}.
+ Register const function_map = ebx; // Map of {function}.
+ Register const function_prototype = function_map; // Prototype of {function}.
+ Register const scratch = edi;
+
+ DCHECK(object.is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(function.is(InstanceOfDescriptor::RightRegister()));
+
+ // Check if {object} is a smi.
+ Label object_is_smi;
+ __ JumpIfSmi(object, &object_is_smi, Label::kNear);
+
+ // Lookup the {function} and the {object} map in the global instanceof cache.
+ // Note: This is safe because we clear the global instanceof cache whenever
+ // we change the prototype of any object.
+ Label fast_case, slow_case;
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ j(not_equal, &fast_case, Label::kNear);
+ __ CompareRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
+ __ j(not_equal, &fast_case, Label::kNear);
+ __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(0);
- // Check that the left hand is a JS object.
- __ JumpIfSmi(object, &not_js_object);
- __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
-
- // If there is a call site cache don't look in the global cache, but do the
- // real lookup and update the call site cache.
- if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
- // Look up the function and the map in the instanceof cache.
- Label miss;
- __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ CompareRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ j(not_equal, &miss, Label::kNear);
- __ LoadRoot(eax, Heap::kInstanceofCacheAnswerRootIndex);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- __ bind(&miss);
- }
+ // If {object} is a smi we can safely return false if {function} is a JS
+ // function, otherwise we have to miss to the runtime and throw an exception.
+ __ bind(&object_is_smi);
+ __ JumpIfSmi(function, &slow_case);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+ __ j(not_equal, &slow_case);
+ __ LoadRoot(eax, Heap::kFalseValueRootIndex);
+ __ ret(0);
- // Get the prototype of the function.
- __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+ // Fast-case: The {function} must be a valid JSFunction.
+ __ bind(&fast_case);
+ __ JumpIfSmi(function, &slow_case);
+ __ CmpObjectType(function, JS_FUNCTION_TYPE, function_map);
+ __ j(not_equal, &slow_case);
- // Check that the function prototype is a JS object.
- __ JumpIfSmi(prototype, &slow);
- __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+ // Ensure that {function} has an instance prototype.
+ __ test_b(FieldOperand(function_map, Map::kBitFieldOffset),
+ static_cast<uint8_t>(1 << Map::kHasNonInstancePrototype));
+ __ j(not_zero, &slow_case);
- // Update the global instanceof or call site inlined cache with the current
- // map and function. The cached answer will be set when it is known below.
- if (!HasCallSiteInlineCheck()) {
- __ StoreRoot(map, scratch, Heap::kInstanceofCacheMapRootIndex);
- __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
- } else {
- // The constants for the code patching are based on no push instructions
- // at the call site.
- DCHECK(HasArgsInRegisters());
- // Get return address and delta to inlined map check.
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp1);
- __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCmp2);
- }
- __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
- __ mov(Operand(scratch, 0), map);
- __ push(map);
- // Scratch points at the cell payload. Calculate the start of the object.
- __ sub(scratch, Immediate(Cell::kValueOffset - 1));
- __ RecordWriteField(scratch, Cell::kValueOffset, map, function,
- kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ pop(map);
- }
+ // Ensure that {function} is not bound.
+ Register const shared_info = scratch;
+ __ mov(shared_info,
+ FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ BooleanBitTest(shared_info, SharedFunctionInfo::kCompilerHintsOffset,
+ SharedFunctionInfo::kBoundFunction);
+ __ j(not_zero, &slow_case);
- // Loop through the prototype chain of the object looking for the function
- // prototype.
- __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
- Label loop, is_instance, is_not_instance;
+ // Get the "prototype" (or initial map) of the {function}.
+ __ mov(function_prototype,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ AssertNotSmi(function_prototype);
+
+ // Resolve the prototype if the {function} has an initial map. Afterwards the
+ // {function_prototype} will be either the JSReceiver prototype object or the
+ // hole value, which means that no instances of the {function} were created so
+ // far and hence we should return false.
+ Label function_prototype_valid;
+ Register const function_prototype_map = scratch;
+ __ CmpObjectType(function_prototype, MAP_TYPE, function_prototype_map);
+ __ j(not_equal, &function_prototype_valid, Label::kNear);
+ __ mov(function_prototype,
+ FieldOperand(function_prototype, Map::kPrototypeOffset));
+ __ bind(&function_prototype_valid);
+ __ AssertNotSmi(function_prototype);
+
+ // Update the global instanceof cache with the current {object} map and
+ // {function}. The cached answer will be set when it is known below.
+ __ StoreRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
+ __ StoreRoot(object_map, scratch, Heap::kInstanceofCacheMapRootIndex);
+
+ // Loop through the prototype chain looking for the {function} prototype.
+ // Assume true, and change to false if not found.
+ Register const object_prototype = object_map;
+ Label done, loop;
+ __ mov(eax, isolate()->factory()->true_value());
__ bind(&loop);
- __ cmp(scratch, prototype);
- __ j(equal, &is_instance, Label::kNear);
- Factory* factory = isolate()->factory();
- __ cmp(scratch, Immediate(factory->null_value()));
- __ j(equal, &is_not_instance, Label::kNear);
- __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
- __ jmp(&loop);
-
- __ bind(&is_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(eax, Immediate(0));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->true_value());
- }
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->true_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Move(eax, Immediate(0));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&is_not_instance);
- if (!HasCallSiteInlineCheck()) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- }
- } else {
- // Get return address and delta to inlined map check.
- __ mov(eax, factory->false_value());
- __ mov(scratch, Operand(esp, 0 * kPointerSize));
- __ sub(scratch, Operand(esp, 1 * kPointerSize));
- if (FLAG_debug_code) {
- __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
- __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
- }
- __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
- if (!ReturnTrueFalseObject()) {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- Label object_not_null, object_not_null_or_smi;
- __ bind(&not_js_object);
- // Before null, smi and string value checks, check that the rhs is a function
- // as for a non-function rhs an exception needs to be thrown.
- __ JumpIfSmi(function, &slow, Label::kNear);
- __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
- __ j(not_equal, &slow, Label::kNear);
-
- // Null is not instance of anything.
- __ cmp(object, factory->null_value());
- __ j(not_equal, &object_not_null, Label::kNear);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- } else {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null);
- // Smi values is not instance of anything.
- __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- } else {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
-
- __ bind(&object_not_null_or_smi);
- // String values is not instance of anything.
- Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
- __ j(NegateCondition(is_string), &slow, Label::kNear);
- if (ReturnTrueFalseObject()) {
- __ mov(eax, factory->false_value());
- } else {
- __ Move(eax, Immediate(Smi::FromInt(1)));
- }
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
+ __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, function_prototype);
+ __ j(equal, &done, Label::kNear);
+ __ cmp(object_prototype, isolate()->factory()->null_value());
+ __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ j(not_equal, &loop);
+ __ mov(eax, isolate()->factory()->false_value());
+ __ bind(&done);
+ __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+ __ ret(0);
- // Slow-case: Go through the JavaScript implementation.
- __ bind(&slow);
- if (!ReturnTrueFalseObject()) {
- // Tail call the builtin which returns 0 or 1.
- if (HasArgsInRegisters()) {
- // Push arguments below return address.
- __ pop(scratch);
- __ push(object);
- __ push(function);
- __ push(scratch);
- }
- __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
- } else {
- // Call the builtin and convert 0/1 to true/false.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(object);
- __ push(function);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- }
- Label true_value, done;
- __ test(eax, eax);
- __ j(zero, &true_value, Label::kNear);
- __ mov(eax, factory->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(eax, factory->true_value());
- __ bind(&done);
- __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
- }
+ // Slow-case: Call the runtime function.
+ __ bind(&slow_case);
+ __ pop(scratch); // Pop return address.
+ __ push(object); // Push {object}.
+ __ push(function); // Push {function}.
+ __ push(scratch); // Push return address.
+ __ TailCallRuntime(Runtime::kInstanceOf, 2, 1);
}
@@ -3103,7 +2886,42 @@ void ToNumberStub::Generate(MacroAssembler* masm) {
__ pop(ecx); // Pop return address.
__ push(eax); // Push argument.
__ push(ecx); // Push return address.
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ __ TailCallRuntime(Runtime::kToNumber, 1, 1);
+}
+
+
+void ToStringStub::Generate(MacroAssembler* masm) {
+ // The ToString stub takes one argument in eax.
+ Label is_number;
+ __ JumpIfSmi(eax, &is_number, Label::kNear);
+
+ Label not_string;
+ __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+ // eax: receiver
+ // edi: receiver map
+ __ j(above_equal, &not_string, Label::kNear);
+ __ Ret();
+ __ bind(&not_string);
+
+ Label not_heap_number;
+ __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+ __ j(not_equal, &not_heap_number, Label::kNear);
+ __ bind(&is_number);
+ NumberToStringStub stub(isolate());
+ __ TailCallStub(&stub);
+ __ bind(&not_heap_number);
+
+ Label not_oddball;
+ __ CmpInstanceType(edi, ODDBALL_TYPE);
+ __ j(not_equal, &not_oddball, Label::kNear);
+ __ mov(eax, FieldOperand(eax, Oddball::kToStringOffset));
+ __ Ret();
+ __ bind(&not_oddball);
+
+ __ pop(ecx); // Pop return address.
+ __ push(eax); // Push argument.
+ __ push(ecx); // Push return address.
+ __ TailCallRuntime(Runtime::kToString, 1, 1);
}
@@ -3233,41 +3051,39 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
void StringCompareStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- // Stack frame on entry.
- // esp[0]: return address
- // esp[4]: right string
- // esp[8]: left string
-
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+ // ----------- S t a t e -------------
+ // -- edx : left string
+ // -- eax : right string
+ // -- esp[0] : return address
+ // -----------------------------------
+ __ AssertString(edx);
+ __ AssertString(eax);
Label not_same;
__ cmp(edx, eax);
__ j(not_equal, &not_same, Label::kNear);
- STATIC_ASSERT(EQUAL == 0);
- STATIC_ASSERT(kSmiTag == 0);
__ Move(eax, Immediate(Smi::FromInt(EQUAL)));
__ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
- __ ret(2 * kPointerSize);
+ __ Ret();
__ bind(&not_same);
// Check that both objects are sequential one-byte strings.
+ Label runtime;
__ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
// Compare flat one-byte strings.
- // Drop arguments from the stack.
- __ pop(ecx);
- __ add(esp, Immediate(2 * kPointerSize));
- __ push(ecx);
+ __ IncrementCounter(isolate()->counters()->string_compare_native(), 1);
StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
edi);
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(eax);
+ __ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
}
@@ -3300,6 +3116,37 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
}
+void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
+ DCHECK_EQ(CompareICState::BOOLEAN, state());
+ Label miss;
+ Label::Distance const miss_distance =
+ masm->emit_debug_code() ? Label::kFar : Label::kNear;
+
+ __ JumpIfSmi(edx, &miss, miss_distance);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ JumpIfSmi(eax, &miss, miss_distance);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(ecx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+ __ JumpIfNotRoot(ebx, Heap::kBooleanMapRootIndex, &miss, miss_distance);
+ if (op() != Token::EQ_STRICT && is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ if (!Token::IsEqualityOp(op())) {
+ __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+ __ AssertSmi(eax);
+ __ mov(edx, FieldOperand(edx, Oddball::kToNumberOffset));
+ __ AssertSmi(edx);
+ __ xchg(eax, edx);
+ }
+ __ sub(eax, edx);
+ __ Ret();
+ }
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
void CompareICStub::GenerateSmis(MacroAssembler* masm) {
DCHECK(state() == CompareICState::SMI);
Label miss;
@@ -3589,15 +3436,24 @@ void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
__ JumpIfSmi(ecx, &miss, Label::kNear);
__ GetWeakValue(edi, cell);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, edi);
+ __ cmp(edi, FieldOperand(eax, HeapObject::kMapOffset));
__ j(not_equal, &miss, Label::kNear);
- __ cmp(ebx, edi);
+ __ cmp(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ j(not_equal, &miss, Label::kNear);
- __ sub(eax, edx);
- __ ret(0);
+ if (Token::IsEqualityOp(op())) {
+ __ sub(eax, edx);
+ __ ret(0);
+ } else if (is_strong(strength())) {
+ __ TailCallRuntime(Runtime::kThrowStrongModeImplicitConversion, 0, 1);
+ } else {
+ __ PopReturnAddressTo(ecx);
+ __ Push(edx);
+ __ Push(eax);
+ __ Push(Immediate(Smi::FromInt(NegativeComparisonResult(GetCondition()))));
+ __ PushReturnAddressFrom(ecx);
+ __ TailCallRuntime(Runtime::kCompare, 3, 1);
+ }
__ bind(&miss);
GenerateMiss(masm);
@@ -4127,14 +3983,14 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
LoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, LoadWithVectorDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
KeyedLoadICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4358,14 +4214,14 @@ void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, VectorStoreICDescriptor::VectorRegister());
+ __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister());
VectorKeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
@@ -4381,11 +4237,180 @@ void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+// value is on the stack already.
+static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector,
+ Register slot, Register feedback,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
+
+ __ push(receiver);
+ __ push(vector);
+
+ Register receiver_map = receiver;
+ Register cached_map = vector;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ mov(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+ __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+
+ // A named keyed store might have a 2 element array, all other cases can count
+ // on an array with at least 2 {map, handler} pairs, so they can go right
+ // into polymorphic array handling.
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &start_polymorphic);
+
+ // found, now call handler.
+ Register handler = feedback;
+ DCHECK(handler.is(VectorStoreICDescriptor::ValueRegister()));
+ __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), handler);
+ __ pop(handler); // Pop "value".
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ // Polymorphic, we have to loop from 2 to N
+
+ // TODO(mvstanton): I think there is a bug here, we are assuming the
+ // array has more than one map/handler pair, but we call this function in the
+ // keyed store with a string key case, where it might be just an array of two
+ // elements.
+
+ __ bind(&start_polymorphic);
+ __ push(key);
+ Register counter = key;
+ __ mov(counter, Immediate(Smi::FromInt(2)));
+ __ bind(&next_loop);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ mov(Operand::StaticVariable(virtual_register), handler);
+ __ pop(handler); // Pop "value".
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ __ bind(&prepare_next);
+ __ add(counter, Immediate(Smi::FromInt(2)));
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicStoreCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector,
+ Register slot, Register weak_cell,
+ Label* miss) {
+ // The store ic value is on the stack.
+ DCHECK(weak_cell.is(VectorStoreICDescriptor::ValueRegister()));
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
+
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+
+ // Move the weak map into the weak_cell register.
+ Register ic_map = weak_cell;
+ __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ cmp(ic_map, FieldOperand(receiver, 0));
+ __ j(not_equal, miss);
+ __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
+ // Put the store ic value back in it's register.
+ __ mov(Operand::StaticVariable(virtual_register), weak_cell);
+ __ pop(weak_cell); // Pop "value".
+ // jump to the handler.
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, miss);
+ __ mov(weak_cell, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(weak_cell, FieldOperand(weak_cell, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), weak_cell);
+ __ pop(weak_cell); // Pop "value".
+ // jump to the handler.
+ __ jmp(Operand::StaticVariable(virtual_register));
+}
+
+
void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
+ Register key = VectorStoreICDescriptor::NameRegister(); // ecx
+ Register value = VectorStoreICDescriptor::ValueRegister(); // eax
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
Label miss;
- // TODO(mvstanton): Implement.
+ __ push(value);
+
+ Register scratch = value;
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay;
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &miss);
+
+ __ pop(value);
+ __ push(slot);
+ __ push(vector);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::STORE_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, code_flags,
+ receiver, key, slot, no_reg);
+ __ pop(vector);
+ __ pop(slot);
+ Label no_pop_miss;
+ __ jmp(&no_pop_miss);
+
__ bind(&miss);
+ __ pop(value);
+ __ bind(&no_pop_miss);
StoreIC::GenerateMiss(masm);
}
@@ -4400,29 +4425,161 @@ void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
}
+static void HandlePolymorphicKeyedStoreCase(MacroAssembler* masm,
+ Register receiver, Register key,
+ Register vector, Register slot,
+ Register feedback, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label transition_call;
+ Label pop_and_miss;
+ ExternalReference virtual_register =
+ ExternalReference::vector_store_virtual_register(masm->isolate());
+
+ __ push(receiver);
+ __ push(vector);
+
+ Register receiver_map = receiver;
+ Register cached_map = vector;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ mov(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+
+ // Polymorphic, we have to loop from 0 to N - 1
+ __ push(key);
+ // On the stack we have:
+ // key (esp)
+ // vector
+ // receiver
+ // value
+ Register counter = key;
+ __ mov(counter, Immediate(Smi::FromInt(0)));
+ __ bind(&next_loop);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ CompareRoot(cached_map, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &transition_call);
+ __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + 2 * kPointerSize));
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ __ mov(Operand::StaticVariable(virtual_register), feedback);
+ __ pop(feedback); // Pop "value".
+ __ jmp(Operand::StaticVariable(virtual_register));
+
+ __ bind(&transition_call);
+ // Oh holy hell this will be tough.
+ // The map goes in vector register.
+ __ mov(receiver, FieldOperand(cached_map, WeakCell::kValueOffset));
+ // The weak cell may have been cleared.
+ __ JumpIfSmi(receiver, &pop_and_miss);
+ // slot goes on the stack, and holds return address.
+ __ xchg(slot, Operand(esp, 4 * kPointerSize));
+ // Get the handler in value.
+ __ mov(feedback, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + 2 * kPointerSize));
+ __ lea(feedback, FieldOperand(feedback, Code::kHeaderSize));
+ // Pop key into place.
+ __ pop(key);
+ // Put the return address on top of stack, vector goes in slot.
+ __ xchg(slot, Operand(esp, 0));
+ // put the map on the stack, receiver holds receiver.
+ __ xchg(receiver, Operand(esp, 1 * kPointerSize));
+ // put the vector on the stack, slot holds value.
+ __ xchg(slot, Operand(esp, 2 * kPointerSize));
+ // feedback (value) = value, slot = handler.
+ __ xchg(feedback, slot);
+ __ jmp(slot);
+
+ __ bind(&prepare_next);
+ __ add(counter, Immediate(Smi::FromInt(3)));
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ bind(&pop_and_miss);
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorStoreICDescriptor::ReceiverRegister(); // edx
+ Register key = VectorStoreICDescriptor::NameRegister(); // ecx
+ Register value = VectorStoreICDescriptor::ValueRegister(); // eax
+ Register vector = VectorStoreICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorStoreICDescriptor::SlotRegister(); // edi
Label miss;
- // TODO(mvstanton): Implement.
+ __ push(value);
+
+ Register scratch = value;
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay;
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandlePolymorphicKeyedStoreCase(masm, receiver, key, vector, slot, scratch,
+ &miss);
+
+ __ bind(&not_array);
+ Label try_poly_name;
+ __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &try_poly_name);
+
+ __ pop(value);
+
+ Handle<Code> megamorphic_stub =
+ KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
+ __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, scratch);
+ __ j(not_equal, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ HandlePolymorphicStoreCase(masm, receiver, key, vector, slot, scratch, &miss);
+
__ bind(&miss);
+ __ pop(value);
KeyedStoreIC::GenerateMiss(masm);
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, ebx);
+ __ EmitLoadTypeFeedbackVector(ebx);
CallICStub stub(isolate(), state());
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
}
-void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
- EmitLoadTypeFeedbackVector(masm, ebx);
- CallIC_ArrayStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
-}
-
-
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
diff --git a/deps/v8/src/x87/code-stubs-x87.h b/deps/v8/src/x87/code-stubs-x87.h
index eb8654dc3e..25fc4d7718 100644
--- a/deps/v8/src/x87/code-stubs-x87.h
+++ b/deps/v8/src/x87/code-stubs-x87.h
@@ -185,7 +185,7 @@ class RecordWriteStub: public PlatformCodeStub {
break;
}
DCHECK(GetMode(stub) == mode);
- CpuFeatures::FlushICache(stub->instruction_start(), 7);
+ Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(), 7);
}
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
diff --git a/deps/v8/src/x87/codegen-x87.cc b/deps/v8/src/x87/codegen-x87.cc
index ba8bef34e4..5df3f1f026 100644
--- a/deps/v8/src/x87/codegen-x87.cc
+++ b/deps/v8/src/x87/codegen-x87.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/x87/codegen-x87.h"
+
#if V8_TARGET_ARCH_X87
#include "src/codegen.h"
@@ -180,7 +182,7 @@ MemMoveFunction CreateMemMoveFunction() {
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(desc));
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICacheWithoutIsolate(buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
// TODO(jkummerow): It would be nice to register this code creation event
// with the PROFILE / GDBJIT system.
@@ -615,7 +617,7 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
- CpuFeatures::FlushICache(sequence, young_length);
+ Assembler::FlushICache(isolate, sequence, young_length);
} else {
Code* stub = GetCodeAgeStub(isolate, age, parity);
CodePatcher patcher(sequence, young_length);
diff --git a/deps/v8/src/x87/frames-x87.cc b/deps/v8/src/x87/frames-x87.cc
index 6b4db97880..80e30a5afe 100644
--- a/deps/v8/src/x87/frames-x87.cc
+++ b/deps/v8/src/x87/frames-x87.cc
@@ -8,6 +8,7 @@
#include "src/frames.h"
#include "src/x87/assembler-x87-inl.h"
#include "src/x87/assembler-x87.h"
+#include "src/x87/frames-x87.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
index c9e75e83ea..89e6ebda8c 100644
--- a/deps/v8/src/x87/frames-x87.h
+++ b/deps/v8/src/x87/frames-x87.h
@@ -39,6 +39,7 @@ class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;
+ static const int kNewTargetArgOffset = +2 * kPointerSize;
static const int kFunctionArgOffset = +3 * kPointerSize;
static const int kReceiverArgOffset = +4 * kPointerSize;
static const int kArgcOffset = +5 * kPointerSize;
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 204e0bf619..3696235165 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -30,11 +30,22 @@ const Register VectorStoreICTrampolineDescriptor::SlotRegister() { return edi; }
const Register VectorStoreICDescriptor::VectorRegister() { return ebx; }
-const Register StoreTransitionDescriptor::MapRegister() {
- return FLAG_vector_stores ? no_reg : ebx;
+const Register VectorStoreTransitionDescriptor::SlotRegister() {
+ return no_reg;
}
+const Register VectorStoreTransitionDescriptor::VectorRegister() {
+ return no_reg;
+}
+
+
+const Register VectorStoreTransitionDescriptor::MapRegister() { return no_reg; }
+
+
+const Register StoreTransitionDescriptor::MapRegister() { return ebx; }
+
+
const Register LoadGlobalViaContextDescriptor::SlotRegister() { return ebx; }
@@ -42,14 +53,23 @@ const Register StoreGlobalViaContextDescriptor::SlotRegister() { return ebx; }
const Register StoreGlobalViaContextDescriptor::ValueRegister() { return eax; }
-const Register InstanceofDescriptor::left() { return eax; }
-const Register InstanceofDescriptor::right() { return edx; }
+const Register InstanceOfDescriptor::LeftRegister() { return edx; }
+const Register InstanceOfDescriptor::RightRegister() { return eax; }
+
+
+const Register StringCompareDescriptor::LeftRegister() { return edx; }
+const Register StringCompareDescriptor::RightRegister() { return eax; }
const Register ArgumentsAccessReadDescriptor::index() { return edx; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
+const Register ArgumentsAccessNewDescriptor::function() { return edi; }
+const Register ArgumentsAccessNewDescriptor::parameter_count() { return ecx; }
+const Register ArgumentsAccessNewDescriptor::parameter_pointer() { return edx; }
+
+
const Register ApiGetterDescriptor::function_address() { return edx; }
@@ -65,17 +85,11 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
-void StoreTransitionDescriptor::InitializePlatformSpecific(
+void VectorStoreTransitionDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
- Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister(),
- MapRegister()};
-
- // When FLAG_vector_stores is true, we want to pass the map register on the
- // stack instead of in a register.
- DCHECK(FLAG_vector_stores || !MapRegister().is(no_reg));
-
- int register_count = FLAG_vector_stores ? 3 : 4;
- data->InitializePlatformSpecific(register_count, registers);
+ Register registers[] = {ReceiverRegister(), NameRegister(), ValueRegister()};
+ // The other three parameters are on the stack in ia32.
+ data->InitializePlatformSpecific(arraysize(registers), registers);
}
@@ -102,6 +116,10 @@ void ToNumberDescriptor::InitializePlatformSpecific(
// static
+const Register ToStringDescriptor::ReceiverRegister() { return eax; }
+
+
+// static
const Register ToObjectDescriptor::ReceiverRegister() { return eax; }
@@ -189,6 +207,15 @@ void CallConstructDescriptor::InitializePlatformSpecific(
}
+void CallTrampolineDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edi : the target to call
+ Register registers[] = {edi, eax};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+
void RegExpConstructResultDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {ecx, ebx, eax};
@@ -371,6 +398,18 @@ void MathRoundVariantCallFromOptimizedCodeDescriptor::
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
+
+
+void PushArgsAndCallDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ Register registers[] = {
+ eax, // argument count (including receiver)
+ ebx, // address of first argument
+ edi // the target callable to be call
+ };
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/x87/lithium-codegen-x87.cc
index 0c852640e8..921259e964 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/x87/lithium-codegen-x87.cc
@@ -8,11 +8,11 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/x87/frames-x87.h"
#include "src/x87/lithium-codegen-x87.h"
@@ -77,7 +77,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
}
}
@@ -109,8 +109,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
- !info()->is_native() && info()->scope()->has_this_declaration()) {
+ if (info()->MustReplaceUndefinedReceiverWithGlobalProxy()) {
Label ok;
// +1 for return address.
int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
@@ -217,15 +216,30 @@ bool LCodeGen::GeneratePrologue() {
}
}
+ // Initailize FPU state.
+ __ fninit();
+
+ return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+ Comment(";;; Prologue begin");
+
// Possibly allocate a local context.
- int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
- if (heap_slots > 0) {
+ if (info_->num_heap_slots() > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- DCHECK(!info()->scope()->is_script_scope());
- if (heap_slots <= FastNewContextStub::kMaximumSlots) {
- FastNewContextStub stub(isolate(), heap_slots);
+ int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+ if (info()->scope()->is_script_scope()) {
+ __ push(edi);
+ __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+ __ CallRuntime(Runtime::kNewScriptContext, 2);
+ deopt_mode = Safepoint::kLazyDeopt;
+ } else if (slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(isolate(), slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
@@ -233,7 +247,8 @@ bool LCodeGen::GeneratePrologue() {
__ push(edi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
- RecordSafepoint(Safepoint::kNoLazyDeopt);
+ RecordSafepoint(deopt_mode);
+
// Context is returned in eax. It replaces the context passed to us.
// It's saved in the stack and kept live in esi.
__ mov(esi, eax);
@@ -267,15 +282,7 @@ bool LCodeGen::GeneratePrologue() {
Comment(";;; End allocate local context");
}
- // Initailize FPU state.
- __ fninit();
- // Trace the call.
- if (FLAG_trace && info()->IsOptimizing()) {
- // We have not executed any compiled code yet, so esi still holds the
- // incoming context.
- __ CallRuntime(Runtime::kTraceEnter, 0);
- }
- return !is_aborted();
+ Comment(";;; Prologue end");
}
@@ -497,7 +504,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// For lazy deoptimization we need space to patch a call after every call.
// Ensure there is always space for such patching, even if the code ends
// in a call.
@@ -1093,7 +1100,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
- DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
@@ -1366,11 +1372,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
- case CodeStub::StringCompare: {
- StringCompareStub stub(isolate());
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- break;
- }
default:
UNREACHABLE();
}
@@ -2330,6 +2331,17 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
}
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
+ int true_block = instr->TrueDestination(chunk_);
+ if (cc == no_condition) {
+ __ jmp(chunk_->GetAssemblyLabel(true_block));
+ } else {
+ __ j(cc, chunk_->GetAssemblyLabel(true_block));
+ }
+}
+
+
template<class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
int false_block = instr->FalseDestination(chunk_);
@@ -2634,40 +2646,6 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
}
-Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object) {
- __ JumpIfSmi(input, is_not_object);
-
- __ cmp(input, isolate()->factory()->null_value());
- __ j(equal, is_object);
-
- __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
- // Undetectable objects behave like undefined.
- __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
- __ j(not_zero, is_not_object);
-
- __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(below, is_not_object);
- __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- return below_equal;
-}
-
-
-void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Register reg = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- Condition true_cond = EmitIsObject(
- reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
-
- EmitBranch(instr, true_cond);
-}
-
-
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
@@ -2741,16 +2719,15 @@ static Condition ComputeCompareCondition(Token::Value op) {
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
- Token::Value op = instr->op();
-
- Handle<Code> ic =
- CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
+ DCHECK(ToRegister(instr->context()).is(esi));
+ DCHECK(ToRegister(instr->left()).is(edx));
+ DCHECK(ToRegister(instr->right()).is(eax));
- Condition condition = ComputeCompareCondition(op);
- __ test(eax, Operand(eax));
+ Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+ CallCode(code, RelocInfo::CODE_TARGET, instr);
+ __ test(eax, eax);
- EmitBranch(instr, condition);
+ EmitBranch(instr, ComputeCompareCondition(instr->op()));
}
@@ -2895,121 +2872,41 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
- // Object and function are in fixed registers defined by the stub.
DCHECK(ToRegister(instr->context()).is(esi));
- InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+ DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+ DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+ DCHECK(ToRegister(instr->result()).is(eax));
+ InstanceOfStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
-
- Label true_value, done;
- __ test(eax, Operand(eax));
- __ j(zero, &true_value, Label::kNear);
- __ mov(ToRegister(instr->result()), factory()->false_value());
- __ jmp(&done, Label::kNear);
- __ bind(&true_value);
- __ mov(ToRegister(instr->result()), factory()->true_value());
- __ bind(&done);
}
-void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
- class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
- public:
- DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
- LInstanceOfKnownGlobal* instr,
- const X87Stack& x87_stack)
- : LDeferredCode(codegen, x87_stack), instr_(instr) { }
- void Generate() override {
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
- }
- LInstruction* instr() override { return instr_; }
- Label* map_check() { return &map_check_; }
- private:
- LInstanceOfKnownGlobal* instr_;
- Label map_check_;
- };
-
- DeferredInstanceOfKnownGlobal* deferred;
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
-
- Label done, false_result;
- Register object = ToRegister(instr->value());
- Register temp = ToRegister(instr->temp());
-
- // A Smi is not an instance of anything.
- __ JumpIfSmi(object, &false_result, Label::kNear);
-
- // This is the inlined call site instanceof cache. The two occurences of the
- // hole value will be patched to the last map/result pair generated by the
- // instanceof stub.
- Label cache_miss;
- Register map = ToRegister(instr->temp());
- __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
- __ bind(deferred->map_check()); // Label for calculating code patching.
- Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
- __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
- __ j(not_equal, &cache_miss, Label::kNear);
- __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
- __ jmp(&done, Label::kNear);
-
- // The inlined call site cache did not match. Check for null and string
- // before calling the deferred code.
- __ bind(&cache_miss);
- // Null is not an instance of anything.
- __ cmp(object, factory()->null_value());
- __ j(equal, &false_result, Label::kNear);
-
- // String values are not instances of anything.
- Condition is_string = masm_->IsObjectStringType(object, temp, temp);
- __ j(is_string, &false_result, Label::kNear);
-
- // Go to the deferred code.
- __ jmp(deferred->entry());
-
- __ bind(&false_result);
- __ mov(ToRegister(instr->result()), factory()->false_value());
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+ LHasInPrototypeChainAndBranch* instr) {
+ Register const object = ToRegister(instr->object());
+ Register const object_map = ToRegister(instr->scratch());
+ Register const object_prototype = object_map;
+ Register const prototype = ToRegister(instr->prototype());
- // Here result has either true or false. Deferred code also produces true or
- // false object.
- __ bind(deferred->exit());
- __ bind(&done);
-}
-
-
-void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check) {
- PushSafepointRegistersScope scope(this);
-
- InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kArgsInRegisters);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kCallSiteInlineCheck);
- flags = static_cast<InstanceofStub::Flags>(
- flags | InstanceofStub::kReturnTrueFalseObject);
- InstanceofStub stub(isolate(), flags);
-
- // Get the temp register reserved by the instruction. This needs to be a
- // register which is pushed last by PushSafepointRegisters as top of the
- // stack is used to pass the offset to the location of the map check to
- // the stub.
- Register temp = ToRegister(instr->temp());
- DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ LoadHeapObject(InstanceofStub::right(), instr->function());
- static const int kAdditionalDelta = 13;
- int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
- __ mov(temp, Immediate(delta));
- __ StoreToSafepointRegisterSlot(temp, temp);
- CallCodeGeneric(stub.GetCode(),
- RelocInfo::CODE_TARGET,
- instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- // Get the deoptimization index of the LLazyBailout-environment that
- // corresponds to this instruction.
- LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
- safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+ // The {object} must be a spec object. It's sufficient to know that {object}
+ // is not a smi, since all other non-spec objects have {null} prototypes and
+ // will be ruled out below.
+ if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+ __ test(object, Immediate(kSmiTagMask));
+ EmitFalseBranch(instr, zero);
+ }
- // Put the result value into the eax slot and restore all registers.
- __ StoreToSafepointRegisterSlot(eax, eax);
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+ Label loop;
+ __ bind(&loop);
+ __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+ __ cmp(object_prototype, prototype);
+ EmitTrueBranch(instr, equal);
+ __ cmp(object_prototype, factory()->null_value());
+ EmitFalseBranch(instr, equal);
+ __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+ __ jmp(&loop);
}
@@ -3708,11 +3605,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
// Change context.
__ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
- // Set eax to arguments count if adaption is not needed. Assumes that eax
- // is available to write to at this point.
- if (dont_adapt_arguments) {
- __ mov(eax, arity);
- }
+ // Always initialize eax to the number of actual arguments.
+ __ mov(eax, arity);
// Invoke function directly.
if (function.is_identical_to(info()->closure())) {
@@ -3774,9 +3668,7 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(edi));
DCHECK(ToRegister(instr->result()).is(eax));
- if (instr->hydrogen()->pass_argument_count()) {
- __ mov(eax, instr->arity());
- }
+ __ mov(eax, instr->arity());
// Change context.
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
@@ -6001,7 +5893,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
// eax = regexp literal clone.
// esi = context.
int literal_offset =
- FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+ LiteralsArray::OffsetOfLiteralAt(instr->hydrogen()->literal_index());
__ LoadHeapObject(ecx, instr->hydrogen()->literals());
__ mov(ebx, FieldOperand(ecx, literal_offset));
__ cmp(ebx, factory()->undefined_value());
@@ -6044,26 +5936,6 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
}
-void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
- DCHECK(ToRegister(instr->context()).is(esi));
- // Use the fast case closure allocation code that allocates in new
- // space for nested functions that don't need literals cloning.
- bool pretenure = instr->hydrogen()->pretenure();
- if (!pretenure && instr->hydrogen()->has_no_literals()) {
- FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
- instr->hydrogen()->kind());
- __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- } else {
- __ push(esi);
- __ push(Immediate(instr->hydrogen()->shared_info()));
- __ push(Immediate(pretenure ? factory()->true_value()
- : factory()->false_value()));
- CallRuntime(Runtime::kNewClosure, 3, instr);
- }
-}
-
-
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(esi));
DCHECK(ToRegister(instr->value()).is(ebx));
@@ -6134,24 +6006,24 @@ Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory()->function_string())) {
- STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
- __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
- __ j(equal, true_label, true_distance);
- __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
+ // Check for callable and not undetectable objects => true.
+ __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+ __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
+ __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+ __ cmp(input, 1 << Map::kIsCallable);
final_branch_condition = equal;
} else if (String::Equals(type_name, factory()->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ cmp(input, factory()->null_value());
__ j(equal, true_label, true_distance);
- __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, input);
__ j(below, false_label, false_distance);
- __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ j(above, false_label, false_distance);
- // Check for undetectable objects => false.
+ // Check for callable or undetectable objects => false.
__ test_b(FieldOperand(input, Map::kBitFieldOffset),
- 1 << Map::kIsUndetectable);
+ (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
final_branch_condition = zero;
// clang-format off
@@ -6199,7 +6071,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
- if (!info()->IsStub()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
diff --git a/deps/v8/src/x87/lithium-codegen-x87.h b/deps/v8/src/x87/lithium-codegen-x87.h
index 9157779a95..2da1a31461 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.h
+++ b/deps/v8/src/x87/lithium-codegen-x87.h
@@ -138,8 +138,6 @@ class LCodeGen: public LCodeGenBase {
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredAllocate(LAllocate* instr);
- void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
- Label* map_check);
void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
@@ -287,7 +285,9 @@ class LCodeGen: public LCodeGenBase {
// EmitBranch expects to be the last instruction of a block.
template<class InstrType>
void EmitBranch(InstrType instr, Condition cc);
- template<class InstrType>
+ template <class InstrType>
+ void EmitTrueBranch(InstrType instr, Condition cc);
+ template <class InstrType>
void EmitFalseBranch(InstrType instr, Condition cc);
void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input,
Register temp, X87Register res_reg,
@@ -298,14 +298,6 @@ class LCodeGen: public LCodeGenBase {
// true and false label should be made, to optimize fallthrough.
Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
- // Emits optimized code for %_IsObject(x). Preserves input register.
- // Returns the condition on which a final split to
- // true and false label should be made, to optimize fallthrough.
- Condition EmitIsObject(Register input,
- Register temp1,
- Label* is_not_object,
- Label* is_object);
-
// Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/x87/lithium-x87.cc
index d382e4f6d5..cb429b2f21 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/x87/lithium-x87.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/x87/lithium-x87.h"
+
#include <sstream>
#if V8_TARGET_ARCH_X87
@@ -198,13 +200,6 @@ void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
}
-void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
- stream->Add("if is_object(");
- value()->PrintTo(stream);
- stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
-}
-
-
void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if is_string(");
value()->PrintTo(stream);
@@ -978,28 +973,25 @@ void LChunkBuilder::AddInstruction(LInstruction* instr,
}
chunk_->AddInstruction(instr, current_block_);
- if (instr->IsCall()) {
+ if (instr->IsCall() || instr->IsPrologue()) {
HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
- LInstruction* instruction_needing_environment = NULL;
if (hydrogen_val->HasObservableSideEffects()) {
HSimulate* sim = HSimulate::cast(hydrogen_val->next());
- instruction_needing_environment = instr;
sim->ReplayEnvironment(current_block_->last_environment());
hydrogen_value_for_lazy_bailout = sim;
}
LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
chunk_->AddInstruction(bailout, current_block_);
- if (instruction_needing_environment != NULL) {
- // Store the lazy deopt environment with the instruction if needed.
- // Right now it is only used for LInstanceOfKnownGlobal.
- instruction_needing_environment->
- SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
- }
}
}
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+ return new (zone()) LPrologue();
+}
+
+
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
return new(zone()) LGoto(instr->FirstSuccessor());
}
@@ -1052,22 +1044,22 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
- LOperand* left = UseFixed(instr->left(), InstanceofStub::left());
- LOperand* right = UseFixed(instr->right(), InstanceofStub::right());
+ LOperand* left =
+ UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+ LOperand* right =
+ UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
LOperand* context = UseFixed(instr->context(), esi);
- LInstanceOf* result = new(zone()) LInstanceOf(context, left, right);
+ LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
return MarkAsCall(DefineFixed(result, eax), instr);
}
-LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
- HInstanceOfKnownGlobal* instr) {
- LInstanceOfKnownGlobal* result =
- new(zone()) LInstanceOfKnownGlobal(
- UseFixed(instr->context(), esi),
- UseFixed(instr->left(), InstanceofStub::left()),
- FixedTemp(edi));
- return MarkAsCall(DefineFixed(result, eax), instr);
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+ HHasInPrototypeChainAndBranch* instr) {
+ LOperand* object = UseRegister(instr->object());
+ LOperand* prototype = UseRegister(instr->prototype());
+ LOperand* temp = TempRegister();
+ return new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
}
@@ -1749,13 +1741,6 @@ LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
}
-LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
- DCHECK(instr->value()->representation().IsSmiOrTagged());
- LOperand* temp = TempRegister();
- return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
-}
-
-
LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
DCHECK(instr->value()->representation().IsTagged());
LOperand* temp = TempRegister();
@@ -2557,13 +2542,6 @@ LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFunctionLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
DCHECK(argument_count_ == 0);
allocator_->MarkAsOsrEntry();
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/x87/lithium-x87.h
index 3e6f67af16..cc1a43fbaf 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/x87/lithium-x87.h
@@ -84,19 +84,17 @@ class LCodeGen;
V(FlooringDivI) \
V(ForInCacheArray) \
V(ForInPrepareMap) \
- V(FunctionLiteral) \
V(GetCachedArrayIndex) \
V(Goto) \
V(HasCachedArrayIndexAndBranch) \
+ V(HasInPrototypeChainAndBranch) \
V(HasInstanceTypeAndBranch) \
V(InnerAllocatedObject) \
V(InstanceOf) \
- V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCallAndBranch) \
- V(IsObjectAndBranch) \
V(IsStringAndBranch) \
V(IsSmiAndBranch) \
V(IsUndetectableAndBranch) \
@@ -135,6 +133,7 @@ class LCodeGen;
V(OsrEntry) \
V(Parameter) \
V(Power) \
+ V(Prologue) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@@ -237,8 +236,6 @@ class LInstruction : public ZoneObject {
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
-
void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
bool IsCall() const { return IsCallBits::decode(bit_field_); }
@@ -413,6 +410,12 @@ class LGoto final : public LTemplateInstruction<0, 0, 0> {
};
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
@@ -1006,22 +1009,6 @@ class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
};
-class LIsObjectAndBranch final : public LControlInstruction<1, 1> {
- public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
-
- void PrintDataTo(StringStream* stream) override;
-};
-
-
class LIsStringAndBranch final : public LControlInstruction<1, 1> {
public:
LIsStringAndBranch(LOperand* value, LOperand* temp) {
@@ -1080,7 +1067,7 @@ class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[1]; }
+ LOperand* context() { return inputs_[0]; }
LOperand* left() { return inputs_[1]; }
LOperand* right() { return inputs_[2]; }
@@ -1199,39 +1186,30 @@ class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
inputs_[2] = right;
}
- LOperand* context() { return inputs_[0]; }
+ LOperand* context() const { return inputs_[0]; }
+ LOperand* left() const { return inputs_[1]; }
+ LOperand* right() const { return inputs_[2]; }
DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
};
-class LInstanceOfKnownGlobal final : public LTemplateInstruction<1, 2, 1> {
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
public:
- LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[0] = context;
- inputs_[1] = value;
- temps_[0] = temp;
+ LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
+ LOperand* scratch) {
+ inputs_[0] = object;
+ inputs_[1] = prototype;
+ temps_[0] = scratch;
}
- LOperand* context() { return inputs_[0]; }
- LOperand* value() { return inputs_[1]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
- "instance-of-known-global")
- DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
-
- Handle<JSFunction> function() const { return hydrogen()->function(); }
- LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
- return lazy_deopt_env_;
- }
- virtual void SetDeferredLazyDeoptimizationEnvironment(
- LEnvironment* env) override {
- lazy_deopt_env_ = env;
- }
+ LOperand* object() const { return inputs_[0]; }
+ LOperand* prototype() const { return inputs_[1]; }
+ LOperand* scratch() const { return temps_[0]; }
- private:
- LEnvironment* lazy_deopt_env_;
+ DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+ "has-in-prototype-chain-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
};
@@ -2605,19 +2583,6 @@ class LRegExpLiteral final : public LTemplateInstruction<1, 1, 0> {
};
-class LFunctionLiteral final : public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFunctionLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
- DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
-};
-
-
class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 1fab3aa7a3..c34a47a251 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -8,10 +8,10 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
-#include "src/cpu-profiler.h"
#include "src/debug/debug.h"
#include "src/runtime/runtime.h"
#include "src/x87/frames-x87.h"
+#include "src/x87/macro-assembler-x87.h"
namespace v8 {
namespace internal {
@@ -66,8 +66,7 @@ void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
if (isolate()->heap()->RootCanBeTreatedAsConstant(index)) {
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- mov(destination, value);
+ mov(destination, isolate()->heap()->root_handle(index));
return;
}
ExternalReference roots_array_start =
@@ -105,16 +104,20 @@ void MacroAssembler::CompareRoot(Register with,
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
+ cmp(with, isolate()->heap()->root_handle(index));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
- Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
- cmp(with, value);
+ cmp(with, isolate()->heap()->root_handle(index));
+}
+
+
+void MacroAssembler::PushRoot(Heap::RootListIndex index) {
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ Push(isolate()->heap()->root_handle(index));
}
@@ -667,26 +670,6 @@ Condition MacroAssembler::IsObjectNameType(Register heap_object,
}
-void MacroAssembler::IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail) {
- mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
- IsInstanceJSObjectType(map, scratch, fail);
-}
-
-
-void MacroAssembler::IsInstanceJSObjectType(Register map,
- Register scratch,
- Label* fail) {
- movzx_b(scratch, FieldOperand(map, Map::kInstanceTypeOffset));
- sub(scratch, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- cmp(scratch,
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- j(above, fail);
-}
-
-
void MacroAssembler::FCmp() {
fucompp();
push(eax);
@@ -798,6 +781,18 @@ void MacroAssembler::AssertName(Register object) {
}
+void MacroAssembler::AssertFunction(Register object) {
+ if (emit_debug_code()) {
+ test(object, Immediate(kSmiTagMask));
+ Check(not_equal, kOperandIsASmiAndNotAFunction);
+ Push(object);
+ CmpObjectType(object, JS_FUNCTION_TYPE, object);
+ Pop(object);
+ Check(equal, kOperandIsNotAFunction);
+ }
+}
+
+
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
Label done_checking;
@@ -845,6 +840,13 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
+void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
+ mov(vector, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ mov(vector, FieldOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+ mov(vector, FieldOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// Out-of-line constant pool not implemented on x87.
@@ -1752,42 +1754,17 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
Label done, loop;
mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
bind(&loop);
- JumpIfSmi(result, &done);
+ JumpIfSmi(result, &done, Label::kNear);
CmpObjectType(result, MAP_TYPE, temp);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
jmp(&loop);
bind(&done);
}
-void MacroAssembler::TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function) {
- Label non_instance;
- if (miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
-
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
-
- // If a bound function, go to miss label.
- mov(scratch,
- FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
- BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
- SharedFunctionInfo::kBoundFunction);
- j(not_zero, miss);
-
- // Make sure that the function has an instance prototype.
- movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
- test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
- }
-
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
mov(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -1801,20 +1778,11 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// If the function does not have an initial map, we're done.
Label done;
CmpObjectType(result, MAP_TYPE, scratch);
- j(not_equal, &done);
+ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
mov(result, FieldOperand(result, Map::kPrototypeOffset));
- if (miss_on_bound_function) {
- jmp(&done);
-
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- GetMapConstructor(result, result, scratch);
- }
-
// All done.
bind(&done);
}
@@ -1926,10 +1894,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Label invoke;
if (expected.is_immediate()) {
DCHECK(actual.is_immediate());
+ mov(eax, actual.immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
- mov(eax, actual.immediate());
const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
if (expected.immediate() == sentinel) {
// Don't worry about adapting arguments for builtins that
@@ -1947,10 +1915,10 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// Expected is in register, actual is immediate. This is the
// case when we invoke function values without going through the
// IC mechanism.
+ mov(eax, actual.immediate());
cmp(expected.reg(), actual.immediate());
j(equal, &invoke);
DCHECK(expected.reg().is(ebx));
- mov(eax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
@@ -1958,6 +1926,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
j(equal, &invoke);
DCHECK(actual.reg().is(eax));
DCHECK(expected.reg().is(ebx));
+ } else {
+ Move(eax, actual.reg());
}
}
@@ -2058,8 +2028,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
}
-void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
DCHECK(flag == JUMP_FUNCTION || has_frame());
@@ -2068,26 +2037,26 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
// arguments match the expected number of arguments. Fake a
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
- GetBuiltinFunction(edi, id);
+ GetBuiltinFunction(edi, native_context_index);
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
expected, expected, flag, call_wrapper);
}
void MacroAssembler::GetBuiltinFunction(Register target,
- Builtins::JavaScript id) {
+ int native_context_index) {
// Load the JavaScript builtin function from the builtins object.
mov(target, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
- mov(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
- mov(target, FieldOperand(target,
- JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+ mov(target, FieldOperand(target, GlobalObject::kNativeContextOffset));
+ mov(target, ContextOperand(target, native_context_index));
}
-void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+void MacroAssembler::GetBuiltinEntry(Register target,
+ int native_context_index) {
DCHECK(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
- GetBuiltinFunction(edi, id);
+ GetBuiltinFunction(edi, native_context_index);
// Load the code entry point from the function into the target register.
mov(target, FieldOperand(edi, JSFunction::kCodeEntryOffset));
}
@@ -2119,6 +2088,12 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalProxy(Register dst) {
+ mov(dst, GlobalObjectOperand());
+ mov(dst, FieldOperand(dst, GlobalObject::kGlobalProxyOffset));
+}
+
+
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
@@ -2501,82 +2476,6 @@ void MacroAssembler::LoadAccessor(Register dst, Register holder,
}
-void MacroAssembler::LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch1;
- Register scratch = scratch2;
-
- // Load the number string cache.
- LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
- sub(mask, Immediate(1)); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Label smi_hash_calculated;
- Label load_result_from_cache;
- Label not_smi;
- STATIC_ASSERT(kSmiTag == 0);
- JumpIfNotSmi(object, &not_smi, Label::kNear);
- mov(scratch, object);
- SmiUntag(scratch);
- jmp(&smi_hash_calculated, Label::kNear);
- bind(&not_smi);
- cmp(FieldOperand(object, HeapObject::kMapOffset),
- isolate()->factory()->heap_number_map());
- j(not_equal, not_found);
- STATIC_ASSERT(8 == kDoubleSize);
- mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
- xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
- // Object is heap number and hash is now in scratch. Calculate cache index.
- and_(scratch, mask);
- Register index = scratch;
- Register probe = mask;
- mov(probe,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- JumpIfSmi(probe, not_found);
- fld_d(FieldOperand(object, HeapNumber::kValueOffset));
- fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
- FCmp();
- j(parity_even, not_found); // Bail out if NaN is involved.
- j(not_equal, not_found); // The cache did not contain this value.
- jmp(&load_result_from_cache, Label::kNear);
-
- bind(&smi_hash_calculated);
- // Object is smi and hash is now in scratch. Calculate cache index.
- and_(scratch, mask);
- // Check if the entry is the smi we are looking for.
- cmp(object,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize));
- j(not_equal, not_found);
-
- // Get the result from the cache.
- bind(&load_result_from_cache);
- mov(result,
- FieldOperand(number_string_cache,
- index,
- times_twice_pointer_size,
- FixedArray::kHeaderSize + kPointerSize));
- IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
-}
-
-
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
Register instance_type, Register scratch, Label* failure) {
if (!scratch.is(instance_type)) {
@@ -2753,7 +2652,7 @@ CodePatcher::CodePatcher(byte* address, int size)
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CpuFeatures::FlushICache(address_, size_);
+ Assembler::FlushICacheWithoutIsolate(address_, size_);
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index 9a2c903ab0..f1a8f82fe8 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -26,6 +26,9 @@ const Register kInterpreterDispatchTableRegister = {kRegister_ebx_Code};
const Register kRuntimeCallFunctionRegister = {kRegister_ebx_Code};
const Register kRuntimeCallArgCountRegister = {kRegister_eax_Code};
+// Spill slots used by interpreter dispatch calling convention.
+const int kInterpreterContextSpillSlot = -1;
+
// Convenience for platform-independent signatures. We do not normally
// distinguish memory operands from other operands on ia32.
typedef Operand MemOperand;
@@ -68,6 +71,16 @@ class MacroAssembler: public Assembler {
void Load(Register dst, const Operand& src, Representation r);
void Store(Register src, const Operand& dst, Representation r);
+ // Load a register with a long value as efficiently as possible.
+ void Set(Register dst, int32_t x) {
+ if (x == 0) {
+ xor_(dst, dst);
+ } else {
+ mov(dst, Immediate(x));
+ }
+ }
+ void Set(const Operand& dst, int32_t x) { mov(dst, Immediate(x)); }
+
// Operations on roots in the root-array.
void LoadRoot(Register destination, Heap::RootListIndex index);
void StoreRoot(Register source, Register scratch, Heap::RootListIndex index);
@@ -76,6 +89,22 @@ class MacroAssembler: public Assembler {
// and not in new space).
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
+ void PushRoot(Heap::RootListIndex index);
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal,
+ Label::Distance if_equal_distance = Label::kNear) {
+ CompareRoot(with, index);
+ j(equal, if_equal, if_equal_distance);
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, Heap::RootListIndex index,
+ Label* if_not_equal,
+ Label::Distance if_not_equal_distance = Label::kNear) {
+ CompareRoot(with, index);
+ j(not_equal, if_not_equal, if_not_equal_distance);
+ }
// ---------------------------------------------------------------------------
// GC Support
@@ -237,6 +266,9 @@ class MacroAssembler: public Assembler {
// Find the function context up the context chain.
void LoadContext(Register dst, int context_chain_length);
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst);
+
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
@@ -327,17 +359,15 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
- // Invoke specified builtin JavaScript function. Adds an entry to
- // the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id,
- InvokeFlag flag,
+ // Invoke specified builtin JavaScript function.
+ void InvokeBuiltin(int native_context_index, InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the function for the given builtin in the target register.
- void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+ void GetBuiltinFunction(Register target, int native_context_index);
// Store the code object for the given builtin in the target register.
- void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ void GetBuiltinEntry(Register target, int native_context_index);
// Expression support
// Support for constant splitting.
@@ -417,18 +447,6 @@ class MacroAssembler: public Assembler {
Register map,
Register instance_type);
- // Check if a heap object's type is in the JSObject range, not including
- // JSFunction. The object's map will be loaded in the map register.
- // Any or all of the three registers may be the same.
- // The contents of the scratch register will always be overwritten.
- void IsObjectJSObjectType(Register heap_object,
- Register map,
- Register scratch,
- Label* fail);
-
- // The contents of the scratch register will be overwritten.
- void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
-
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
@@ -541,6 +559,9 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a name, enabled via --debug-code.
void AssertName(Register object);
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
@@ -693,11 +714,8 @@ class MacroAssembler: public Assembler {
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
- void TryGetFunctionPrototype(Register function,
- Register result,
- Register scratch,
- Label* miss,
- bool miss_on_bound_function = false);
+ void TryGetFunctionPrototype(Register function, Register result,
+ Register scratch, Label* miss);
// Picks out an array index from the hash field.
// Register use:
@@ -780,8 +798,14 @@ class MacroAssembler: public Assembler {
void Drop(int element_count);
void Call(Label* target) { call(target); }
+ void Call(Handle<Code> target, RelocInfo::Mode rmode) { call(target, rmode); }
+ void Jump(Handle<Code> target, RelocInfo::Mode rmode) { jmp(target, rmode); }
void Push(Register src) { push(src); }
+ void Push(const Operand& src) { push(src); }
+ void Push(Immediate value) { push(value); }
void Pop(Register dst) { pop(dst); }
+ void PushReturnAddressFrom(Register src) { push(src); }
+ void PopReturnAddressTo(Register dst) { pop(dst); }
void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
void Lzcnt(Register dst, const Operand& src);
@@ -853,17 +877,6 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// String utilities.
- // Generate code to do a lookup in the number string cache. If the number in
- // the register object is found in the cache the generated code falls through
- // with the result in the result register. The object and the result register
- // can be the same. If the number is not found in the cache the code jumps to
- // the label not_found with only the content of register object unchanged.
- void LookupNumberStringCache(Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Label* not_found);
-
// Check whether the instance type represents a flat one-byte string. Jump to
// the label if not. If the instance type can be scratched specify same
// register for both instance type and scratch.
@@ -895,6 +908,9 @@ class MacroAssembler: public Assembler {
return SafepointRegisterStackIndex(reg.code());
}
+ // Load the type feedback vector from a JavaScript frame.
+ void EmitLoadTypeFeedbackVector(Register vector);
+
// Activation support.
void EnterFrame(StackFrame::Type type);
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg);
diff --git a/deps/v8/src/x87/simulator-x87.cc b/deps/v8/src/x87/simulator-x87.cc
index 20edae83a2..cb5652b581 100644
--- a/deps/v8/src/x87/simulator-x87.cc
+++ b/deps/v8/src/x87/simulator-x87.cc
@@ -2,5 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/x87/simulator-x87.h"
// Since there is no simulator for the ia32 architecture this file is empty.
diff --git a/deps/v8/src/zone-type-cache.h b/deps/v8/src/zone-type-cache.h
new file mode 100644
index 0000000000..bdc4388009
--- /dev/null
+++ b/deps/v8/src/zone-type-cache.h
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ZONE_TYPE_CACHE_H_
+#define V8_ZONE_TYPE_CACHE_H_
+
+
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+
+class ZoneTypeCache final {
+ private:
+ // This has to be first for the initialization magic to work.
+ Zone zone_;
+
+ public:
+ ZoneTypeCache() = default;
+
+ Type* const kInt8 =
+ CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
+ Type* const kUint8 =
+ CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
+ Type* const kUint8Clamped = kUint8;
+ Type* const kInt16 =
+ CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
+ Type* const kUint16 =
+ CreateNative(CreateRange<uint16_t>(), Type::UntaggedUnsigned16());
+ Type* const kInt32 = CreateNative(Type::Signed32(), Type::UntaggedSigned32());
+ Type* const kUint32 =
+ CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
+ Type* const kFloat32 = CreateNative(Type::Number(), Type::UntaggedFloat32());
+ Type* const kFloat64 = CreateNative(Type::Number(), Type::UntaggedFloat64());
+
+ Type* const kSingletonZero = CreateRange(0.0, 0.0);
+ Type* const kSingletonOne = CreateRange(1.0, 1.0);
+ Type* const kZeroOrOne = CreateRange(0.0, 1.0);
+ Type* const kZeroish =
+ Type::Union(kSingletonZero, Type::MinusZeroOrNaN(), zone());
+ Type* const kInteger = CreateRange(-V8_INFINITY, V8_INFINITY);
+ Type* const kWeakint = Type::Union(kInteger, Type::MinusZeroOrNaN(), zone());
+ Type* const kWeakintFunc1 = Type::Function(kWeakint, Type::Number(), zone());
+
+ Type* const kRandomFunc0 = Type::Function(Type::OrderedNumber(), zone());
+ Type* const kAnyFunc0 = Type::Function(Type::Any(), zone());
+ Type* const kAnyFunc1 = Type::Function(Type::Any(), Type::Any(), zone());
+ Type* const kAnyFunc2 =
+ Type::Function(Type::Any(), Type::Any(), Type::Any(), zone());
+ Type* const kAnyFunc3 = Type::Function(Type::Any(), Type::Any(), Type::Any(),
+ Type::Any(), zone());
+ Type* const kNumberFunc0 = Type::Function(Type::Number(), zone());
+ Type* const kNumberFunc1 =
+ Type::Function(Type::Number(), Type::Number(), zone());
+ Type* const kNumberFunc2 =
+ Type::Function(Type::Number(), Type::Number(), Type::Number(), zone());
+ Type* const kImulFunc = Type::Function(Type::Signed32(), Type::Integral32(),
+ Type::Integral32(), zone());
+ Type* const kClz32Func =
+ Type::Function(CreateRange(0, 32), Type::Number(), zone());
+
+#define TYPED_ARRAY(TypeName, type_name, TYPE_NAME, ctype, size) \
+ Type* const k##TypeName##Array = CreateArray(k##TypeName);
+ TYPED_ARRAYS(TYPED_ARRAY)
+#undef TYPED_ARRAY
+
+ private:
+ Type* CreateArray(Type* element) { return Type::Array(element, zone()); }
+
+ Type* CreateArrayFunction(Type* array) {
+ Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
+ Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
+ Type* arg3 = arg2;
+ return Type::Function(array, arg1, arg2, arg3, zone());
+ }
+
+ Type* CreateNative(Type* semantic, Type* representation) {
+ return Type::Intersect(semantic, representation, zone());
+ }
+
+ template <typename T>
+ Type* CreateRange() {
+ return CreateRange(std::numeric_limits<T>::min(),
+ std::numeric_limits<T>::max());
+ }
+
+ Type* CreateRange(double min, double max) {
+ return Type::Range(min, max, zone());
+ }
+
+ Zone* zone() { return &zone_; }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_ZONE_TYPE_CACHE_H_
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index 72be29c383..05f276d3f4 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -173,6 +173,20 @@ int main(int argc, char* argv[]) {
#endif // V8_CC_MSVC
#endif // V8_OS_WIN
+ // hack to print cctest specific flags
+ for (int i = 1; i < argc; i++) {
+ char* arg = argv[i];
+ if ((strcmp(arg, "--help") == 0) || (strcmp(arg, "-h") == 0)) {
+ printf("Usage: %s [--list] [[V8_FLAGS] CCTEST]\n", argv[0]);
+ printf("\n");
+ printf("Options:\n");
+ printf(" --list: list all cctests\n");
+ printf(" CCTEST: cctest identfier returned by --list\n");
+ printf(" D8_FLAGS: see d8 output below\n");
+ printf("\n\n");
+ }
+ }
+
v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 10207c1038..a7092a8bdc 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -68,12 +68,14 @@
'compiler/test-osr.cc',
'compiler/test-pipeline.cc',
'compiler/test-representation-change.cc',
+ 'compiler/test-run-bytecode-graph-builder.cc',
'compiler/test-run-deopt.cc',
'compiler/test-run-inlining.cc',
'compiler/test-run-intrinsics.cc',
'compiler/test-run-jsbranches.cc',
'compiler/test-run-jscalls.cc',
'compiler/test-run-jsexceptions.cc',
+ 'compiler/test-run-jsobjects.cc',
'compiler/test-run-jsops.cc',
'compiler/test-run-machops.cc',
'compiler/test-run-native-calls.cc',
@@ -83,20 +85,26 @@
'compiler/test-run-variables.cc',
'compiler/test-simplified-lowering.cc',
'cctest.cc',
+ 'expression-type-collector.cc',
+ 'expression-type-collector.h',
'interpreter/test-bytecode-generator.cc',
'interpreter/test-interpreter.cc',
'gay-fixed.cc',
'gay-precision.cc',
'gay-shortest.cc',
+ 'heap-tester.h',
'print-extension.cc',
'profiler-extension.cc',
'test-accessors.cc',
'test-alloc.cc',
'test-api.cc',
'test-api.h',
+ 'test-api-accessors.cc',
'test-api-interceptors.cc',
'test-array-list.cc',
'test-ast.cc',
+ 'test-ast-expression-visitor.cc',
+ 'test-asm-validator.cc',
'test-atomicops.cc',
'test-bignum.cc',
'test-bignum-dtoa.cc',
@@ -128,6 +136,7 @@
'test-heap-profiler.cc',
'test-hydrogen-types.cc',
'test-identity-map.cc',
+ 'test-incremental-marking.cc',
'test-list.cc',
'test-liveedit.cc',
'test-lockers.cc',
@@ -147,6 +156,7 @@
'test-sampler-api.cc',
'test-serialize.cc',
'test-simd.cc',
+ 'test-slots-buffer.cc',
'test-spaces.cc',
'test-strings.cc',
'test-symbols.cc',
@@ -156,6 +166,7 @@
'test-transitions.cc',
'test-typedarrays.cc',
'test-types.cc',
+ 'test-typing-reset.cc',
'test-unbound-queue.cc',
'test-unboxed-doubles.cc',
'test-unique.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 5c19195208..b609036f56 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -29,6 +29,8 @@
#define CCTEST_H_
#include "include/libplatform/libplatform.h"
+#include "src/isolate-inl.h" // TODO(everyone): Make cctest IWYU.
+#include "src/objects-inl.h" // TODO(everyone): Make cctest IWYU.
#include "src/v8.h"
#ifndef TEST
@@ -88,21 +90,6 @@ typedef v8::internal::EnumSet<CcTestExtensionIds> CcTestExtensionFlags;
#undef DEFINE_EXTENSION_FLAG
-// Use this to expose protected methods in i::Heap.
-class TestHeap : public i::Heap {
- public:
- using i::Heap::AllocateByteArray;
- using i::Heap::AllocateFixedArray;
- using i::Heap::AllocateHeapNumber;
- using i::Heap::AllocateFloat32x4;
- using i::Heap::AllocateJSObject;
- using i::Heap::AllocateJSObjectFromMap;
- using i::Heap::AllocateMap;
- using i::Heap::CopyCode;
- using i::Heap::kInitialNumberStringCacheSize;
-};
-
-
class CcTest {
public:
typedef void (TestFunction)();
@@ -136,10 +123,6 @@ class CcTest {
return i_isolate()->heap();
}
- static TestHeap* test_heap() {
- return reinterpret_cast<TestHeap*>(i_isolate()->heap());
- }
-
static v8::base::RandomNumberGenerator* random_number_generator() {
return InitIsolateOnce()->random_number_generator();
}
@@ -288,18 +271,17 @@ class RegisterThreadedTest {
// A LocalContext holds a reference to a v8::Context.
class LocalContext {
public:
- LocalContext(v8::Isolate* isolate,
- v8::ExtensionConfiguration* extensions = 0,
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::Handle<v8::ObjectTemplate>(),
- v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>()) {
+ LocalContext(v8::Isolate* isolate, v8::ExtensionConfiguration* extensions = 0,
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate>(),
+ v8::Local<v8::Value> global_object = v8::Local<v8::Value>()) {
Initialize(isolate, extensions, global_template, global_object);
}
LocalContext(v8::ExtensionConfiguration* extensions = 0,
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::Handle<v8::ObjectTemplate>(),
- v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>()) {
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::Local<v8::ObjectTemplate>(),
+ v8::Local<v8::Value> global_object = v8::Local<v8::Value>()) {
Initialize(CcTest::isolate(), extensions, global_template, global_object);
}
@@ -320,10 +302,9 @@ class LocalContext {
}
private:
- void Initialize(v8::Isolate* isolate,
- v8::ExtensionConfiguration* extensions,
- v8::Handle<v8::ObjectTemplate> global_template,
- v8::Handle<v8::Value> global_object) {
+ void Initialize(v8::Isolate* isolate, v8::ExtensionConfiguration* extensions,
+ v8::Local<v8::ObjectTemplate> global_template,
+ v8::Local<v8::Value> global_object) {
v8::HandleScope scope(isolate);
v8::Local<v8::Context> context = v8::Context::New(isolate,
extensions,
@@ -354,7 +335,9 @@ static inline v8::Local<v8::Value> v8_num(double x) {
static inline v8::Local<v8::String> v8_str(const char* x) {
- return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x);
+ return v8::String::NewFromUtf8(v8::Isolate::GetCurrent(), x,
+ v8::NewStringType::kNormal)
+ .ToLocalChecked();
}
@@ -363,13 +346,18 @@ static inline v8::Local<v8::Symbol> v8_symbol(const char* name) {
}
-static inline v8::Local<v8::Script> v8_compile(const char* x) {
- return v8::Script::Compile(v8_str(x));
+static inline v8::Local<v8::Script> v8_compile(v8::Local<v8::String> x) {
+ v8::Local<v8::Script> result;
+ if (v8::Script::Compile(v8::Isolate::GetCurrent()->GetCurrentContext(), x)
+ .ToLocal(&result)) {
+ return result;
+ }
+ return v8::Local<v8::Script>();
}
-static inline v8::Local<v8::Script> v8_compile(v8::Local<v8::String> x) {
- return v8::Script::Compile(x);
+static inline v8::Local<v8::Script> v8_compile(const char* x) {
+ return v8_compile(v8_str(x));
}
@@ -378,7 +366,8 @@ static inline v8::Local<v8::Script> CompileWithOrigin(
v8::ScriptOrigin origin(origin_url);
v8::ScriptCompiler::Source script_source(source, origin);
return v8::ScriptCompiler::Compile(
- v8::Isolate::GetCurrent(), &script_source);
+ v8::Isolate::GetCurrent()->GetCurrentContext(), &script_source)
+ .ToLocalChecked();
}
@@ -395,20 +384,42 @@ static inline v8::Local<v8::Script> CompileWithOrigin(const char* source,
// Helper functions that compile and run the source.
-static inline v8::Local<v8::Value> CompileRun(const char* source) {
- return v8::Script::Compile(v8_str(source))->Run();
+static inline v8::MaybeLocal<v8::Value> CompileRun(
+ v8::Local<v8::Context> context, const char* source) {
+ return v8::Script::Compile(context, v8_str(source))
+ .ToLocalChecked()
+ ->Run(context);
+}
+
+
+static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
+ v8::Local<v8::Value> result;
+ if (v8_compile(source)
+ ->Run(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .ToLocal(&result)) {
+ return result;
+ }
+ return v8::Local<v8::Value>();
}
// Helper functions that compile and run the source.
-static inline v8::MaybeLocal<v8::Value> CompileRun(
- v8::Local<v8::Context> context, const char* source) {
- return v8::Script::Compile(v8_str(source))->Run(context);
+static inline v8::Local<v8::Value> CompileRun(const char* source) {
+ return CompileRun(v8_str(source));
}
-static inline v8::Local<v8::Value> CompileRun(v8::Local<v8::String> source) {
- return v8::Script::Compile(source)->Run();
+static inline v8::Local<v8::Value> CompileRun(
+ v8::Local<v8::Context> context, v8::ScriptCompiler::Source* script_source,
+ v8::ScriptCompiler::CompileOptions options) {
+ v8::Local<v8::Value> result;
+ if (v8::ScriptCompiler::Compile(context, script_source, options)
+ .ToLocalChecked()
+ ->Run(context)
+ .ToLocal(&result)) {
+ return result;
+ }
+ return v8::Local<v8::Value>();
}
@@ -416,16 +427,18 @@ static inline v8::Local<v8::Value> ParserCacheCompileRun(const char* source) {
// Compile once just to get the preparse data, then compile the second time
// using the data.
v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::ScriptCompiler::Source script_source(v8_str(source));
- v8::ScriptCompiler::Compile(isolate, &script_source,
- v8::ScriptCompiler::kProduceParserCache);
+ v8::ScriptCompiler::Compile(context, &script_source,
+ v8::ScriptCompiler::kProduceParserCache)
+ .ToLocalChecked();
// Check whether we received cached data, and if so use it.
v8::ScriptCompiler::CompileOptions options =
script_source.GetCachedData() ? v8::ScriptCompiler::kConsumeParserCache
: v8::ScriptCompiler::kNoCompileOptions;
- return v8::ScriptCompiler::Compile(isolate, &script_source, options)->Run();
+ return CompileRun(context, &script_source, options);
}
@@ -435,20 +448,24 @@ static inline v8::Local<v8::Value> CompileRunWithOrigin(const char* source,
int line_number,
int column_number) {
v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::ScriptOrigin origin(v8_str(origin_url),
v8::Integer::New(isolate, line_number),
v8::Integer::New(isolate, column_number));
v8::ScriptCompiler::Source script_source(v8_str(source), origin);
- return v8::ScriptCompiler::Compile(isolate, &script_source)->Run();
+ return CompileRun(context, &script_source,
+ v8::ScriptCompiler::CompileOptions());
}
static inline v8::Local<v8::Value> CompileRunWithOrigin(
v8::Local<v8::String> source, const char* origin_url) {
+ v8::Isolate* isolate = v8::Isolate::GetCurrent();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::ScriptCompiler::Source script_source(
source, v8::ScriptOrigin(v8_str(origin_url)));
- return v8::ScriptCompiler::Compile(v8::Isolate::GetCurrent(), &script_source)
- ->Run();
+ return CompileRun(context, &script_source,
+ v8::ScriptCompiler::CompileOptions());
}
@@ -470,14 +487,18 @@ static inline void ExpectString(const char* code, const char* expected) {
static inline void ExpectInt32(const char* code, int expected) {
v8::Local<v8::Value> result = CompileRun(code);
CHECK(result->IsInt32());
- CHECK_EQ(expected, result->Int32Value());
+ CHECK_EQ(expected,
+ result->Int32Value(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .FromJust());
}
static inline void ExpectBoolean(const char* code, bool expected) {
v8::Local<v8::Value> result = CompileRun(code);
CHECK(result->IsBoolean());
- CHECK_EQ(expected, result->BooleanValue());
+ CHECK_EQ(expected,
+ result->BooleanValue(v8::Isolate::GetCurrent()->GetCurrentContext())
+ .FromJust());
}
@@ -504,8 +525,14 @@ static inline void ExpectUndefined(const char* code) {
}
+static inline void DisableInlineAllocationSteps(v8::internal::NewSpace* space) {
+ space->LowerInlineAllocationLimit(0);
+}
+
+
// Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
+ DisableInlineAllocationSteps(space);
v8::internal::AllocationResult allocation = space->AllocateRawUnaligned(
v8::internal::Page::kMaxRegularHeapObjectSize);
if (allocation.IsRetry()) return false;
@@ -520,6 +547,7 @@ static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
// Helper function that simulates a fill new-space in the heap.
static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
int extra_bytes) {
+ DisableInlineAllocationSteps(space);
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
CHECK(space_remaining >= extra_bytes);
@@ -555,7 +583,8 @@ static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
// Helper function that simulates many incremental marking steps until
// marking is completed.
-static inline void SimulateIncrementalMarking(i::Heap* heap) {
+static inline void SimulateIncrementalMarking(i::Heap* heap,
+ bool force_completion = true) {
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
if (collector->sweeping_in_progress()) {
@@ -563,9 +592,11 @@ static inline void SimulateIncrementalMarking(i::Heap* heap) {
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
- marking->Start(i::Heap::kNoGCFlags);
+ heap->StartIncrementalMarking();
}
CHECK(marking->IsMarking());
+ if (!force_completion) return;
+
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (marking->IsReadyToOverApproximateWeakClosure()) {
@@ -590,8 +621,8 @@ static inline void DisableDebugger() { v8::Debug::SetDebugEventListener(NULL); }
static inline void EmptyMessageQueues(v8::Isolate* isolate) {
while (v8::platform::PumpMessageLoop(v8::internal::V8::GetCurrentPlatform(),
- isolate))
- ;
+ isolate)) {
+ }
}
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 013d0bf0da..eb67611623 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -130,9 +130,6 @@
# TODO(machenbach, mvstanton): Flaky in debug on all platforms.
'test-lockers/LockerUnlocker': [PASS, ['mode == debug', FLAKY]],
-
- # BUG(4141).
- 'test-alloc/CodeRange': [PASS, FLAKY],
}], # ALWAYS
##############################################################################
@@ -214,9 +211,6 @@
##############################################################################
['system == windows', {
- # BUG(3005).
- 'test-alloc/CodeRange': [PASS, FAIL],
-
# BUG(3331). Fails on windows.
'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
@@ -282,7 +276,7 @@
}], # 'arch == mips'
##############################################################################
-['arch == mips64el', {
+['arch == mips64el or arch == mips64', {
'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
# BUG(v8:3154).
@@ -293,7 +287,7 @@
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
-}], # 'arch == mips64el'
+}], # 'arch == mips64el or arch == mips64'
##############################################################################
['arch == x87', {
diff --git a/deps/v8/test/cctest/compiler/codegen-tester.h b/deps/v8/test/cctest/compiler/codegen-tester.h
index d8ecc02fc2..9ca1066ecd 100644
--- a/deps/v8/test/cctest/compiler/codegen-tester.h
+++ b/deps/v8/test/cctest/compiler/codegen-tester.h
@@ -58,8 +58,9 @@ class RawMachineAssemblerTester : public HandleAndZoneScope,
Schedule* schedule = this->Export();
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- code_ = Pipeline::GenerateCodeForTesting(this->isolate(), call_descriptor,
- graph, schedule);
+ CompilationInfo info("testing", main_isolate(), main_zone());
+ code_ = Pipeline::GenerateCodeForTesting(&info, call_descriptor, graph,
+ schedule);
}
return this->code_.ToHandleChecked()->entry();
}
@@ -206,7 +207,7 @@ class CompareWrapper {
explicit CompareWrapper(IrOpcode::Value op) : opcode(op) {}
Node* MakeNode(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
- return m->NewNode(op(m->machine()), a, b);
+ return m->AddNode(op(m->machine()), a, b);
}
const Operator* op(MachineOperatorBuilder* machine) {
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 56ab514c65..c2b25e5547 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -31,9 +31,9 @@ class FunctionTester : public InitializedHandleScope {
function((FLAG_allow_natives_syntax = true, NewFunction(source))),
flags_(flags) {
Compile(function);
- const uint32_t supported_flags = CompilationInfo::kContextSpecializing |
- CompilationInfo::kInliningEnabled |
- CompilationInfo::kTypingEnabled;
+ const uint32_t supported_flags =
+ CompilationInfo::kFunctionContextSpecializing |
+ CompilationInfo::kInliningEnabled | CompilationInfo::kTypingEnabled;
CHECK_EQ(0u, flags_ & ~supported_flags);
}
@@ -51,13 +51,13 @@ class FunctionTester : public InitializedHandleScope {
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
Handle<Object> args[] = {a, b};
- return Execution::Call(isolate, function, undefined(), 2, args, false);
+ return Execution::Call(isolate, function, undefined(), 2, args);
}
MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b, Handle<Object> c,
Handle<Object> d) {
Handle<Object> args[] = {a, b, c, d};
- return Execution::Call(isolate, function, undefined(), 4, args, false);
+ return Execution::Call(isolate, function, undefined(), 4, args);
}
void CheckThrows(Handle<Object> a, Handle<Object> b) {
@@ -69,8 +69,8 @@ class FunctionTester : public InitializedHandleScope {
isolate->OptionalRescheduleException(true);
}
- v8::Handle<v8::Message> CheckThrowsReturnMessage(Handle<Object> a,
- Handle<Object> b) {
+ v8::Local<v8::Message> CheckThrowsReturnMessage(Handle<Object> a,
+ Handle<Object> b) {
TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
MaybeHandle<Object> no_result = Call(a, b);
CHECK(isolate->has_pending_exception());
@@ -122,12 +122,12 @@ class FunctionTester : public InitializedHandleScope {
Handle<JSFunction> NewFunction(const char* source) {
return v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+ *v8::Local<v8::Function>::Cast(CompileRun(source)));
}
Handle<JSObject> NewObject(const char* source) {
return v8::Utils::OpenHandle(
- *v8::Handle<v8::Object>::Cast(CompileRun(source)));
+ *v8::Local<v8::Object>::Cast(CompileRun(source)));
}
Handle<String> Val(const char* string) {
@@ -161,8 +161,8 @@ class FunctionTester : public InitializedHandleScope {
CHECK(Parser::ParseStatic(info.parse_info()));
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
- if (flags_ & CompilationInfo::kContextSpecializing) {
- info.MarkAsContextSpecializing();
+ if (flags_ & CompilationInfo::kFunctionContextSpecializing) {
+ info.MarkAsFunctionContextSpecializing();
}
if (flags_ & CompilationInfo::kInliningEnabled) {
info.MarkAsInliningEnabled();
diff --git a/deps/v8/test/cctest/compiler/graph-builder-tester.h b/deps/v8/test/cctest/compiler/graph-builder-tester.h
index 41c1e384be..077b71f17b 100644
--- a/deps/v8/test/cctest/compiler/graph-builder-tester.h
+++ b/deps/v8/test/cctest/compiler/graph-builder-tester.h
@@ -104,8 +104,7 @@ class GraphBuilderTester : public HandleAndZoneScope,
return NewNode(common()->Int32Constant(value));
}
Node* HeapConstant(Handle<HeapObject> object) {
- Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
- return NewNode(common()->HeapConstant(val));
+ return NewNode(common()->HeapConstant(object));
}
Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); }
@@ -273,7 +272,8 @@ class GraphBuilderTester : public HandleAndZoneScope,
Zone* zone = graph()->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
- code_ = Pipeline::GenerateCodeForTesting(main_isolate(), desc, graph());
+ CompilationInfo info("testing", main_isolate(), main_zone());
+ code_ = Pipeline::GenerateCodeForTesting(&info, desc, graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
index 2d6338d060..19228fa2f0 100644
--- a/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
+++ b/deps/v8/test/cctest/compiler/test-graph-visualizer.cc
@@ -20,8 +20,10 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
-static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
- "dummy", 0, 0, 0, 1, 0, 0);
+static Operator dummy_operator1(IrOpcode::kParameter, Operator::kNoWrite,
+ "dummy", 1, 0, 0, 1, 0, 0);
+static Operator dummy_operator6(IrOpcode::kParameter, Operator::kNoWrite,
+ "dummy", 6, 0, 0, 1, 0, 0);
TEST(NodeWithNullInputReachableFromEnd) {
@@ -106,18 +108,18 @@ TEST(NodeNetworkOfDummiesReachableFromEnd) {
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
- Node* n2 = graph.NewNode(&dummy_operator, graph.start());
- Node* n3 = graph.NewNode(&dummy_operator, graph.start());
- Node* n4 = graph.NewNode(&dummy_operator, n2);
- Node* n5 = graph.NewNode(&dummy_operator, n2);
- Node* n6 = graph.NewNode(&dummy_operator, n3);
- Node* n7 = graph.NewNode(&dummy_operator, n3);
- Node* n8 = graph.NewNode(&dummy_operator, n5);
- Node* n9 = graph.NewNode(&dummy_operator, n5);
- Node* n10 = graph.NewNode(&dummy_operator, n9);
- Node* n11 = graph.NewNode(&dummy_operator, n9);
+ Node* n2 = graph.NewNode(&dummy_operator1, graph.start());
+ Node* n3 = graph.NewNode(&dummy_operator1, graph.start());
+ Node* n4 = graph.NewNode(&dummy_operator1, n2);
+ Node* n5 = graph.NewNode(&dummy_operator1, n2);
+ Node* n6 = graph.NewNode(&dummy_operator1, n3);
+ Node* n7 = graph.NewNode(&dummy_operator1, n3);
+ Node* n8 = graph.NewNode(&dummy_operator1, n5);
+ Node* n9 = graph.NewNode(&dummy_operator1, n5);
+ Node* n10 = graph.NewNode(&dummy_operator1, n9);
+ Node* n11 = graph.NewNode(&dummy_operator1, n9);
Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
- Node* end = graph.NewNode(&dummy_operator, 6, end_dependencies);
+ Node* end = graph.NewNode(&dummy_operator6, 6, end_dependencies);
graph.SetEnd(end);
OFStream os(stdout);
diff --git a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
index 8774a9a9e3..9d5ff000b7 100644
--- a/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
+++ b/deps/v8/test/cctest/compiler/test-js-constant-cache.cc
@@ -41,15 +41,16 @@ class JSConstantCacheTester : public HandleAndZoneScope,
JSGraph(main_isolate(), &main_graph_, &main_common_, &main_javascript_,
&main_machine_) {
main_graph_.SetStart(main_graph_.NewNode(common()->Start(0)));
- main_graph_.SetEnd(main_graph_.NewNode(common()->End(1)));
+ main_graph_.SetEnd(
+ main_graph_.NewNode(common()->End(1), main_graph_.start()));
main_typer_.Run();
}
- Type* upper(Node* node) { return NodeProperties::GetBounds(node).upper; }
+ Type* TypeOf(Node* node) { return NodeProperties::GetType(node); }
- Handle<Object> handle(Node* node) {
+ Handle<HeapObject> handle(Node* node) {
CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
- return OpParameter<Unique<Object> >(node).handle();
+ return OpParameter<Handle<HeapObject>>(node);
}
Factory* factory() { return main_isolate()->factory(); }
@@ -69,7 +70,7 @@ TEST(ZeroConstant1) {
CHECK_NE(zero, T.Float64Constant(0));
CHECK_NE(zero, T.Int32Constant(0));
- Type* t = T.upper(zero);
+ Type* t = T.TypeOf(zero);
CHECK(t->Is(Type::Number()));
CHECK(t->Is(Type::Integral32()));
@@ -90,7 +91,7 @@ TEST(MinusZeroConstant) {
CHECK_EQ(minus_zero, T.Constant(-0.0));
CHECK_NE(zero, minus_zero);
- Type* t = T.upper(minus_zero);
+ Type* t = T.TypeOf(minus_zero);
CHECK(t->Is(Type::Number()));
CHECK(t->Is(Type::MinusZero()));
@@ -123,7 +124,7 @@ TEST(ZeroConstant2) {
CHECK_NE(zero, T.Float64Constant(0));
CHECK_NE(zero, T.Int32Constant(0));
- Type* t = T.upper(zero);
+ Type* t = T.TypeOf(zero);
CHECK(t->Is(Type::Number()));
CHECK(t->Is(Type::Integral32()));
@@ -148,7 +149,7 @@ TEST(OneConstant1) {
CHECK_NE(one, T.Float64Constant(1.0));
CHECK_NE(one, T.Int32Constant(1));
- Type* t = T.upper(one);
+ Type* t = T.TypeOf(one);
CHECK(t->Is(Type::Number()));
CHECK(t->Is(Type::Integral32()));
@@ -173,7 +174,7 @@ TEST(OneConstant2) {
CHECK_NE(one, T.Float64Constant(1.0));
CHECK_NE(one, T.Int32Constant(1));
- Type* t = T.upper(one);
+ Type* t = T.TypeOf(one);
CHECK(t->Is(Type::Number()));
CHECK(t->Is(Type::Integral32()));
@@ -233,7 +234,7 @@ TEST(NumberTypes) {
FOR_FLOAT64_INPUTS(i) {
double value = *i;
Node* node = T.Constant(value);
- CHECK(T.upper(node)->Is(Type::Of(value, T.main_zone())));
+ CHECK(T.TypeOf(node)->Is(Type::Of(value, T.main_zone())));
}
}
@@ -280,15 +281,15 @@ TEST(OddballValues) {
TEST(OddballTypes) {
JSConstantCacheTester T;
- CHECK(T.upper(T.UndefinedConstant())->Is(Type::Undefined()));
+ CHECK(T.TypeOf(T.UndefinedConstant())->Is(Type::Undefined()));
// TODO(dcarney): figure this out.
- // CHECK(T.upper(T.TheHoleConstant())->Is(Type::Internal()));
- CHECK(T.upper(T.TrueConstant())->Is(Type::Boolean()));
- CHECK(T.upper(T.FalseConstant())->Is(Type::Boolean()));
- CHECK(T.upper(T.NullConstant())->Is(Type::Null()));
- CHECK(T.upper(T.ZeroConstant())->Is(Type::Number()));
- CHECK(T.upper(T.OneConstant())->Is(Type::Number()));
- CHECK(T.upper(T.NaNConstant())->Is(Type::NaN()));
+ // CHECK(T.TypeOf(T.TheHoleConstant())->Is(Type::Internal()));
+ CHECK(T.TypeOf(T.TrueConstant())->Is(Type::Boolean()));
+ CHECK(T.TypeOf(T.FalseConstant())->Is(Type::Boolean()));
+ CHECK(T.TypeOf(T.NullConstant())->Is(Type::Null()));
+ CHECK(T.TypeOf(T.ZeroConstant())->Is(Type::Number()));
+ CHECK(T.TypeOf(T.OneConstant())->Is(Type::Number()));
+ CHECK(T.TypeOf(T.NaNConstant())->Is(Type::NaN()));
}
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 328b0aefdd..773d74170f 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -93,7 +93,7 @@ TEST(ReduceJSLoadContext) {
Node* new_context_input = NodeProperties::GetValueInput(r.replacement(), 0);
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
- CHECK_EQ(*native, *match.Value().handle());
+ CHECK_EQ(*native, *match.Value());
ContextAccess access = OpParameter<ContextAccess>(r.replacement());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
@@ -110,7 +110,7 @@ TEST(ReduceJSLoadContext) {
HeapObjectMatcher match(r.replacement());
CHECK(match.HasValue());
- CHECK_EQ(*expected, *match.Value().handle());
+ CHECK_EQ(*expected, *match.Value());
}
// TODO(titzer): test with other kinds of contexts, e.g. a function context.
@@ -140,24 +140,27 @@ TEST(ReduceJSStoreContext) {
{
// Mutable slot, constant context, depth = 0 => do nothing.
- Node* load = t.graph()->NewNode(t.javascript()->StoreContext(0, 0),
- const_context, const_context, start);
+ Node* load =
+ t.graph()->NewNode(t.javascript()->StoreContext(0, 0), const_context,
+ const_context, const_context, start, start);
Reduction r = t.spec()->Reduce(load);
CHECK(!r.Changed());
}
{
// Mutable slot, non-constant context, depth = 0 => do nothing.
- Node* load = t.graph()->NewNode(t.javascript()->StoreContext(0, 0),
- param_context, param_context, start);
+ Node* load =
+ t.graph()->NewNode(t.javascript()->StoreContext(0, 0), param_context,
+ param_context, const_context, start, start);
Reduction r = t.spec()->Reduce(load);
CHECK(!r.Changed());
}
{
// Immutable slot, constant context, depth = 0 => do nothing.
- Node* load = t.graph()->NewNode(t.javascript()->StoreContext(0, slot),
- const_context, const_context, start);
+ Node* load =
+ t.graph()->NewNode(t.javascript()->StoreContext(0, slot), const_context,
+ const_context, const_context, start, start);
Reduction r = t.spec()->Reduce(load);
CHECK(!r.Changed());
}
@@ -166,13 +169,13 @@ TEST(ReduceJSStoreContext) {
// Mutable slot, constant context, depth > 0 => fold-in parent context.
Node* load = t.graph()->NewNode(
t.javascript()->StoreContext(2, Context::GLOBAL_EVAL_FUN_INDEX),
- deep_const_context, deep_const_context, start);
+ deep_const_context, deep_const_context, const_context, start, start);
Reduction r = t.spec()->Reduce(load);
CHECK(r.Changed());
Node* new_context_input = NodeProperties::GetValueInput(r.replacement(), 0);
CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
HeapObjectMatcher match(new_context_input);
- CHECK_EQ(*native, *match.Value().handle());
+ CHECK_EQ(*native, *match.Value());
ContextAccess access = OpParameter<ContextAccess>(r.replacement());
CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, static_cast<int>(access.index()));
CHECK_EQ(0, static_cast<int>(access.depth()));
@@ -219,9 +222,10 @@ TEST(SpecializeToContext) {
Node* other_use =
t.graph()->NewNode(t.simplified()->ChangeTaggedToInt32(), other_load);
- Node* add =
- t.graph()->NewNode(t.javascript()->Add(LanguageMode::SLOPPY), value_use,
- other_use, param_context, other_load, start);
+ Node* add = t.graph()->NewNode(
+ t.javascript()->Add(LanguageMode::SLOPPY), value_use, other_use,
+ param_context, t.jsgraph()->EmptyFrameState(),
+ t.jsgraph()->EmptyFrameState(), other_load, start);
Node* ret =
t.graph()->NewNode(t.common()->Return(), add, effect_use, start);
@@ -249,7 +253,7 @@ TEST(SpecializeToContext) {
Node* replacement = value_use->InputAt(0);
HeapObjectMatcher match(replacement);
CHECK(match.HasValue());
- CHECK_EQ(*expected, *match.Value().handle());
+ CHECK_EQ(*expected, *match.Value());
}
// TODO(titzer): clean up above test and test more complicated effects.
}
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index e512de89b2..bac511dd2d 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -41,7 +41,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
typer(main_isolate(), &graph),
context_node(NULL) {
graph.SetStart(graph.NewNode(common.Start(num_parameters)));
- graph.SetEnd(graph.NewNode(common.End(1)));
+ graph.SetEnd(graph.NewNode(common.End(1), graph.start()));
typer.Run();
}
@@ -58,20 +58,17 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* Parameter(Type* t, int32_t index = 0) {
Node* n = graph.NewNode(common.Parameter(index), graph.start());
- NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+ NodeProperties::SetType(n, t);
return n;
}
Node* UndefinedConstant() {
- Unique<HeapObject> unique = Unique<HeapObject>::CreateImmovable(
- isolate->factory()->undefined_value());
- return graph.NewNode(common.HeapConstant(unique));
+ Handle<HeapObject> value = isolate->factory()->undefined_value();
+ return graph.NewNode(common.HeapConstant(value));
}
Node* HeapConstant(Handle<HeapObject> constant) {
- Unique<HeapObject> unique =
- Unique<HeapObject>::CreateUninitialized(constant);
- return graph.NewNode(common.HeapConstant(unique));
+ return graph.NewNode(common.HeapConstant(constant));
}
Node* EmptyFrameState(Node* context) {
@@ -82,7 +79,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* state_node = graph.NewNode(
common.FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
nullptr),
- parameters, locals, stack, context, UndefinedConstant());
+ parameters, locals, stack, context, UndefinedConstant(), graph.start());
return state_node;
}
@@ -108,14 +105,12 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* control() { return start(); }
- void CheckPureBinop(IrOpcode::Value expected, Node* node) {
+ void CheckBinop(IrOpcode::Value expected, Node* node) {
CHECK_EQ(expected, node->opcode());
- CHECK_EQ(2, node->InputCount()); // should not have context, effect, etc.
}
- void CheckPureBinop(const Operator* expected, Node* node) {
+ void CheckBinop(const Operator* expected, Node* node) {
CHECK_EQ(expected->opcode(), node->op()->opcode());
- CHECK_EQ(2, node->InputCount()); // should not have context, effect, etc.
}
Node* ReduceUnop(const Operator* op, Type* input_type) {
@@ -128,16 +123,23 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* Binop(const Operator* op, Node* left, Node* right) {
// JS binops also require context, effect, and control
- if (OperatorProperties::GetFrameStateInputCount(op) == 1) {
- return graph.NewNode(op, left, right, context(),
- EmptyFrameState(context()), start(), control());
- } else if (OperatorProperties::GetFrameStateInputCount(op) == 2) {
- return graph.NewNode(op, left, right, context(),
- EmptyFrameState(context()),
- EmptyFrameState(context()), start(), control());
- } else {
- return graph.NewNode(op, left, right, context(), start(), control());
+ std::vector<Node*> inputs;
+ inputs.push_back(left);
+ inputs.push_back(right);
+ if (OperatorProperties::HasContextInput(op)) {
+ inputs.push_back(context());
+ }
+ for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(op); i++) {
+ inputs.push_back(EmptyFrameState(context()));
+ }
+ if (op->EffectInputCount() > 0) {
+ inputs.push_back(start());
+ }
+ if (op->ControlInputCount() > 0) {
+ inputs.push_back(control());
}
+ return graph.NewNode(op, static_cast<int>(inputs.size()),
+ &(inputs.front()));
}
Node* Unop(const Operator* op, Node* input) {
@@ -193,9 +195,9 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
CheckHandle(isolate->factory()->false_value(), result);
}
- void CheckHandle(Handle<Object> expected, Node* result) {
+ void CheckHandle(Handle<HeapObject> expected, Node* result) {
CHECK_EQ(IrOpcode::kHeapConstant, result->opcode());
- Handle<Object> value = OpParameter<Unique<Object> >(result).handle();
+ Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(result);
CHECK_EQ(*expected, *value);
}
};
@@ -245,7 +247,7 @@ TEST_WITH_STRONG(StringBinops) {
Node* add = R.Binop(R.javascript.Add(language_mode), p0, p1);
Node* r = R.reduce(add);
- R.CheckPureBinop(IrOpcode::kStringAdd, r);
+ R.CheckBinop(IrOpcode::kStringAdd, r);
CHECK_EQ(p0, r->InputAt(0));
CHECK_EQ(p1, r->InputAt(1));
}
@@ -262,7 +264,7 @@ TEST_WITH_STRONG(AddNumber1) {
Node* add = R.Binop(R.javascript.Add(language_mode), p0, p1);
Node* r = R.reduce(add);
- R.CheckPureBinop(IrOpcode::kNumberAdd, r);
+ R.CheckBinop(IrOpcode::kNumberAdd, r);
CHECK_EQ(p0, r->InputAt(0));
CHECK_EQ(p1, r->InputAt(1));
}
@@ -289,7 +291,7 @@ TEST_WITH_STRONG(NumberBinops) {
Node* add = R.Binop(ops[k], p0, p1);
Node* r = R.reduce(add);
- R.CheckPureBinop(ops[k + 1], r);
+ R.CheckBinop(ops[k + 1], r);
CHECK_EQ(p0, r->InputAt(0));
CHECK_EQ(p1, r->InputAt(1));
}
@@ -299,8 +301,8 @@ TEST_WITH_STRONG(NumberBinops) {
static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
- Type* old_type = NodeProperties::GetBounds(old_input).upper;
- Type* new_type = NodeProperties::GetBounds(new_input).upper;
+ Type* old_type = NodeProperties::GetType(old_input);
+ Type* new_type = NodeProperties::GetType(new_input);
Type* expected_type = I32Type(is_signed);
CHECK(new_type->Is(expected_type));
if (old_type->Is(expected_type)) {
@@ -359,7 +361,7 @@ TEST(Int32BitwiseShifts) {
Node* add = R.Binop(R.ops[k], p0, p1);
Node* r = R.reduce(add);
- R.CheckPureBinop(R.ops[k + 1], r);
+ R.CheckBinop(R.ops[k + 1], r);
Node* r0 = r->InputAt(0);
Node* r1 = r->InputAt(1);
@@ -417,7 +419,7 @@ TEST(Int32BitwiseBinops) {
Node* add = R.Binop(R.ops[k], p0, p1);
Node* r = R.reduce(add);
- R.CheckPureBinop(R.ops[k + 1], r);
+ R.CheckBinop(R.ops[k + 1], r);
CheckToI32(p0, r->InputAt(0), R.signedness[k]);
CheckToI32(p1, r->InputAt(1), R.signedness[k + 1]);
@@ -492,7 +494,7 @@ TEST(JSToNumberOfConstant) {
// Note that either outcome below is correct. It only depends on whether
// the types of constants are eagerly computed or only computed by the
// typing pass.
- if (NodeProperties::GetBounds(n).upper->Is(Type::Number())) {
+ if (NodeProperties::GetType(n)->Is(Type::Number())) {
// If number constants are eagerly typed, then reduction should
// remove the ToNumber.
CHECK_EQ(n, r);
@@ -610,7 +612,7 @@ TEST_WITH_STRONG(StringComparison) {
Node* cmp = R.Binop(ops[k], p0, p1);
Node* r = R.reduce(cmp);
- R.CheckPureBinop(ops[k + 1], r);
+ R.CheckBinop(ops[k + 1], r);
if (k >= 4) {
// GreaterThan and GreaterThanOrEqual commute the inputs
// and use the LessThan and LessThanOrEqual operators.
@@ -627,9 +629,9 @@ TEST_WITH_STRONG(StringComparison) {
static void CheckIsConvertedToNumber(Node* val, Node* converted) {
- if (NodeProperties::GetBounds(val).upper->Is(Type::Number())) {
+ if (NodeProperties::GetType(val)->Is(Type::Number())) {
CHECK_EQ(val, converted);
- } else if (NodeProperties::GetBounds(val).upper->Is(Type::Boolean())) {
+ } else if (NodeProperties::GetType(val)->Is(Type::Boolean())) {
CHECK_EQ(IrOpcode::kBooleanToNumber, converted->opcode());
CHECK_EQ(val, converted->InputAt(0));
} else {
@@ -658,7 +660,7 @@ TEST_WITH_STRONG(NumberComparison) {
Node* cmp = R.Binop(ops[k], p0, p1);
Node* r = R.reduce(cmp);
- R.CheckPureBinop(ops[k + 1], r);
+ R.CheckBinop(ops[k + 1], r);
if (k >= 4) {
// GreaterThan and GreaterThanOrEqual commute the inputs
// and use the LessThan and LessThanOrEqual operators.
@@ -688,13 +690,13 @@ TEST_WITH_STRONG(MixedComparison1) {
Node* cmp = R.Binop(less_than, p0, p1);
Node* r = R.reduce(cmp);
if (types[i]->Is(Type::String()) && types[j]->Is(Type::String())) {
- R.CheckPureBinop(R.simplified.StringLessThan(), r);
+ R.CheckBinop(R.simplified.StringLessThan(), r);
} else if ((types[i]->Is(Type::Number()) &&
types[j]->Is(Type::Number())) ||
(!is_strong(language_mode) &&
(!types[i]->Maybe(Type::String()) ||
!types[j]->Maybe(Type::String())))) {
- R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+ R.CheckBinop(R.simplified.NumberLessThan(), r);
} else {
// No reduction of mixed types.
CHECK_EQ(r->op(), less_than);
@@ -831,20 +833,21 @@ void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
Node* p1 = j == 1 ? l : r;
{
- Node* eq = strict ? R->graph.NewNode(R->javascript.StrictEqual(), p0, p1)
- : R->Binop(R->javascript.Equal(), p0, p1);
+ const Operator* op =
+ strict ? R->javascript.StrictEqual() : R->javascript.Equal();
+ Node* eq = R->Binop(op, p0, p1);
Node* r = R->reduce(eq);
- R->CheckPureBinop(expected, r);
+ R->CheckBinop(expected, r);
}
{
- Node* ne = strict
- ? R->graph.NewNode(R->javascript.StrictNotEqual(), p0, p1)
- : R->Binop(R->javascript.NotEqual(), p0, p1);
+ const Operator* op =
+ strict ? R->javascript.StrictNotEqual() : R->javascript.NotEqual();
+ Node* ne = R->Binop(op, p0, p1);
Node* n = R->reduce(ne);
CHECK_EQ(IrOpcode::kBooleanNot, n->opcode());
Node* r = n->InputAt(0);
- R->CheckPureBinop(expected, r);
+ R->CheckBinop(expected, r);
}
}
}
@@ -915,7 +918,7 @@ TEST_WITH_STRONG(RemovePureNumberBinopEffects) {
BinopEffectsTester B(ops[j], Type::Number(), Type::Number());
CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
- B.R.CheckPureBinop(B.result->opcode(), B.result);
+ B.R.CheckBinop(B.result->opcode(), B.result);
B.CheckNoOp(0);
B.CheckNoOp(1);
@@ -1055,7 +1058,7 @@ TEST(Int32BinopEffects) {
BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right));
CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
- B.R.CheckPureBinop(B.result->opcode(), B.result);
+ B.R.CheckBinop(B.result->opcode(), B.result);
B.CheckNoOp(0);
B.CheckNoOp(1);
@@ -1068,7 +1071,7 @@ TEST(Int32BinopEffects) {
BinopEffectsTester B(R.ops[j], Type::Number(), Type::Number());
CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
- B.R.CheckPureBinop(B.result->opcode(), B.result);
+ B.R.CheckBinop(B.result->opcode(), B.result);
B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
@@ -1080,7 +1083,7 @@ TEST(Int32BinopEffects) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
BinopEffectsTester B(R.ops[j], Type::Number(), Type::Primitive());
- B.R.CheckPureBinop(B.result->opcode(), B.result);
+ B.R.CheckBinop(B.result->opcode(), B.result);
Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
@@ -1097,7 +1100,7 @@ TEST(Int32BinopEffects) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
BinopEffectsTester B(R.ops[j], Type::Primitive(), Type::Number());
- B.R.CheckPureBinop(B.result->opcode(), B.result);
+ B.R.CheckBinop(B.result->opcode(), B.result);
Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
@@ -1114,7 +1117,7 @@ TEST(Int32BinopEffects) {
bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
BinopEffectsTester B(R.ops[j], Type::Primitive(), Type::Primitive());
- B.R.CheckPureBinop(B.result->opcode(), B.result);
+ B.R.CheckBinop(B.result->opcode(), B.result);
Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
@@ -1246,7 +1249,7 @@ TEST_WITH_STRONG(Int32Comparisons) {
} else {
expected = ops[o].num_op;
}
- R.CheckPureBinop(expected, r);
+ R.CheckBinop(expected, r);
if (ops[o].commute) {
CHECK_EQ(p1, r->InputAt(0));
CHECK_EQ(p0, r->InputAt(1));
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
index 6560ae337b..60e7657f2b 100644
--- a/deps/v8/test/cctest/compiler/test-loop-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
@@ -27,7 +27,7 @@ static Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
0, 1, 0, 0);
static Operator kIntLt(IrOpcode::kInt32LessThan, Operator::kPure,
"Int32LessThan", 2, 0, 0, 1, 0, 0);
-static Operator kStore(IrOpcode::kStore, Operator::kNoProperties, "Store", 0, 2,
+static Operator kStore(IrOpcode::kStore, Operator::kNoProperties, "Store", 1, 1,
1, 0, 1, 0);
static const int kNumLeafs = 4;
@@ -234,8 +234,7 @@ struct StoreLoop {
Node* store;
explicit StoreLoop(While& w)
- : base(w.t.jsgraph.Int32Constant(12)),
- val(w.t.jsgraph.Int32Constant(13)) {
+ : base(w.t.graph.start()), val(w.t.jsgraph.Int32Constant(13)) {
Build(w);
}
@@ -243,7 +242,7 @@ struct StoreLoop {
void Build(While& w) {
phi = w.t.graph.NewNode(w.t.op(2, true), base, base, w.loop);
- store = w.t.graph.NewNode(&kStore, phi, val, w.loop);
+ store = w.t.graph.NewNode(&kStore, val, phi, w.loop);
phi->ReplaceInput(1, store);
}
};
@@ -489,7 +488,7 @@ TEST(LaNestedLoop1x) {
p2a->ReplaceInput(1, p2b);
p2b->ReplaceInput(1, p2a);
- t.Return(t.p0, p1a, w1.exit);
+ t.Return(t.p0, t.start, w1.exit);
Node* chain[] = {w1.loop, w2.loop};
t.CheckNestedLoops(chain, 2);
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 299f0c02ab..c02e7e5751 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -703,7 +703,8 @@ TEST(ReduceLoadStore) {
Node* base = R.Constant<int32_t>(11);
Node* index = R.Constant<int32_t>(4);
- Node* load = R.graph.NewNode(R.machine.Load(kMachInt32), base, index);
+ Node* load = R.graph.NewNode(R.machine.Load(kMachInt32), base, index,
+ R.graph.start(), R.graph.start());
{
MachineOperatorReducer reducer(&R.jsgraph);
@@ -714,7 +715,7 @@ TEST(ReduceLoadStore) {
{
Node* store = R.graph.NewNode(
R.machine.Store(StoreRepresentation(kMachInt32, kNoWriteBarrier)), base,
- index, load);
+ index, load, load, R.graph.start());
MachineOperatorReducer reducer(&R.jsgraph);
Reduction reduction = reducer.Reduce(store);
CHECK(!reduction.Changed()); // stores should not be reduced.
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index 1a6bf7ca12..8ac38274b6 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -16,8 +16,14 @@ using namespace v8::internal::compiler;
#define NONE reinterpret_cast<Node*>(1)
-static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
- "dummy", 0, 0, 0, 1, 0, 0);
+static Operator dummy_operator0(IrOpcode::kParameter, Operator::kNoWrite,
+ "dummy", 0, 0, 0, 1, 0, 0);
+static Operator dummy_operator1(IrOpcode::kParameter, Operator::kNoWrite,
+ "dummy", 1, 0, 0, 1, 0, 0);
+static Operator dummy_operator2(IrOpcode::kParameter, Operator::kNoWrite,
+ "dummy", 2, 0, 0, 1, 0, 0);
+static Operator dummy_operator3(IrOpcode::kParameter, Operator::kNoWrite,
+ "dummy", 3, 0, 0, 1, 0, 0);
#define CHECK_USES(node, ...) \
do { \
@@ -28,9 +34,12 @@ static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
} while (false)
+namespace {
+
typedef std::multiset<Node*, std::less<Node*>> NodeMSet;
-static void CheckUseChain(Node* node, Node** uses, int use_count) {
+
+void CheckUseChain(Node* node, Node** uses, int use_count) {
// Check ownership.
if (use_count == 1) CHECK(node->OwnedBy(uses[0]));
if (use_count > 1) {
@@ -82,16 +91,7 @@ static void CheckUseChain(Node* node, Node** uses, int use_count) {
}
-#define CHECK_INPUTS(node, ...) \
- do { \
- Node* __array[] = {__VA_ARGS__}; \
- int __size = \
- __array[0] != NONE ? static_cast<int>(arraysize(__array)) : 0; \
- CheckInputs(node, __array, __size); \
- } while (false)
-
-
-static void CheckInputs(Node* node, Node** inputs, int input_count) {
+void CheckInputs(Node* node, Node** inputs, int input_count) {
CHECK_EQ(input_count, node->InputCount());
// Check InputAt().
for (int i = 0; i < static_cast<int>(input_count); i++) {
@@ -129,14 +129,25 @@ static void CheckInputs(Node* node, Node** inputs, int input_count) {
}
}
+} // namespace
+
+
+#define CHECK_INPUTS(node, ...) \
+ do { \
+ Node* __array[] = {__VA_ARGS__}; \
+ int __size = \
+ __array[0] != NONE ? static_cast<int>(arraysize(__array)) : 0; \
+ CheckInputs(node, __array, __size); \
+ } while (false)
+
TEST(NodeUseIteratorReplaceUses) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
- Node* n3 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
+ Node* n3 = graph.NewNode(&dummy_operator0);
CHECK_USES(n0, n1, n2);
@@ -158,8 +169,8 @@ TEST(NodeUseIteratorReplaceUses) {
TEST(NodeUseIteratorReplaceUsesSelf) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
CHECK_USES(n0, n1);
CHECK_USES(n1, NONE);
@@ -169,7 +180,7 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
CHECK_USES(n0, NONE);
CHECK_USES(n1, n1);
- Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n2 = graph.NewNode(&dummy_operator0);
n1->ReplaceUses(n2);
@@ -182,11 +193,11 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
TEST(ReplaceInput) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
- Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
- Node* n4 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator0);
+ Node* n3 = graph.NewNode(&dummy_operator3, n0, n1, n2);
+ Node* n4 = graph.NewNode(&dummy_operator0);
CHECK_USES(n0, n3);
CHECK_USES(n1, n3);
@@ -210,17 +221,17 @@ TEST(OwnedBy) {
Graph graph(&zone);
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
CHECK(!n0->OwnedBy(n1));
CHECK(!n1->OwnedBy(n0));
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
CHECK(n0->OwnedBy(n2));
CHECK(!n2->OwnedBy(n0));
- Node* n3 = graph.NewNode(&dummy_operator, n0);
+ Node* n3 = graph.NewNode(&dummy_operator1, n0);
CHECK(!n0->OwnedBy(n2));
CHECK(!n0->OwnedBy(n3));
CHECK(!n2->OwnedBy(n0));
@@ -228,11 +239,11 @@ TEST(OwnedBy) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
CHECK(n0->OwnedBy(n1));
CHECK(!n1->OwnedBy(n0));
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
CHECK(!n0->OwnedBy(n1));
CHECK(!n0->OwnedBy(n2));
CHECK(!n1->OwnedBy(n0));
@@ -240,7 +251,7 @@ TEST(OwnedBy) {
CHECK(!n2->OwnedBy(n0));
CHECK(!n2->OwnedBy(n1));
- Node* n3 = graph.NewNode(&dummy_operator);
+ Node* n3 = graph.NewNode(&dummy_operator0);
n2->ReplaceInput(0, n3);
CHECK(n0->OwnedBy(n1));
@@ -259,18 +270,18 @@ TEST(Uses) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
CHECK_USES(n0, n1);
CHECK_USES(n1, NONE);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
CHECK_USES(n0, n1, n2);
CHECK_USES(n2, NONE);
- Node* n3 = graph.NewNode(&dummy_operator, n0);
+ Node* n3 = graph.NewNode(&dummy_operator1, n0);
CHECK_USES(n0, n1, n2, n3);
CHECK_USES(n3, NONE);
@@ -281,14 +292,14 @@ TEST(Inputs) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
- Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
+ Node* n3 = graph.NewNode(&dummy_operator3, n0, n1, n2);
CHECK_INPUTS(n3, n0, n1, n2);
- Node* n4 = graph.NewNode(&dummy_operator, n0, n1, n2);
+ Node* n4 = graph.NewNode(&dummy_operator3, n0, n1, n2);
n3->AppendInput(graph.zone(), n4);
CHECK_INPUTS(n3, n0, n1, n2, n4);
@@ -299,7 +310,7 @@ TEST(Inputs) {
CHECK_INPUTS(n3, n0, n1, n2, n4, n4);
CHECK_USES(n4, n3, n3);
- Node* n5 = graph.NewNode(&dummy_operator, n4);
+ Node* n5 = graph.NewNode(&dummy_operator1, n4);
CHECK_USES(n4, n3, n3, n5);
}
@@ -309,9 +320,9 @@ TEST(RemoveInput) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n1);
CHECK_INPUTS(n0, NONE);
CHECK_INPUTS(n1, n0);
@@ -339,16 +350,16 @@ TEST(AppendInputsAndIterator) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n1);
CHECK_INPUTS(n0, NONE);
CHECK_INPUTS(n1, n0);
CHECK_INPUTS(n2, n0, n1);
CHECK_USES(n0, n1, n2);
- Node* n3 = graph.NewNode(&dummy_operator);
+ Node* n3 = graph.NewNode(&dummy_operator0);
n2->AppendInput(graph.zone(), n3);
@@ -361,9 +372,9 @@ TEST(NullInputsSimple) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n1);
CHECK_INPUTS(n0, NONE);
CHECK_INPUTS(n1, n0);
@@ -388,10 +399,10 @@ TEST(NullInputsAppended) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
- Node* n3 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
+ Node* n3 = graph.NewNode(&dummy_operator1, n0);
n3->AppendInput(graph.zone(), n1);
n3->AppendInput(graph.zone(), n2);
@@ -411,10 +422,10 @@ TEST(ReplaceUsesFromAppendedInputs) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
- Node* n3 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
+ Node* n3 = graph.NewNode(&dummy_operator0);
CHECK_INPUTS(n2, n0);
@@ -439,14 +450,14 @@ TEST(ReplaceInputMultipleUses) {
Zone zone;
Graph graph(&zone);
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
n2->ReplaceInput(0, n1);
CHECK_EQ(0, n0->UseCount());
CHECK_EQ(1, n1->UseCount());
- Node* n3 = graph.NewNode(&dummy_operator, n0);
+ Node* n3 = graph.NewNode(&dummy_operator1, n0);
n3->ReplaceInput(0, n1);
CHECK_EQ(0, n0->UseCount());
CHECK_EQ(2, n1->UseCount());
@@ -458,25 +469,25 @@ TEST(TrimInputCountInline) {
Graph graph(&zone);
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
n1->TrimInputCount(1);
CHECK_INPUTS(n1, n0);
CHECK_USES(n0, n1);
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
n1->TrimInputCount(0);
CHECK_INPUTS(n1, NONE);
CHECK_USES(n0, NONE);
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n1);
n2->TrimInputCount(2);
CHECK_INPUTS(n2, n0, n1);
CHECK_USES(n0, n2);
@@ -484,9 +495,9 @@ TEST(TrimInputCountInline) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n1);
n2->TrimInputCount(1);
CHECK_INPUTS(n2, n0);
CHECK_USES(n0, n2);
@@ -494,9 +505,9 @@ TEST(TrimInputCountInline) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n1);
n2->TrimInputCount(0);
CHECK_INPUTS(n2, NONE);
CHECK_USES(n0, NONE);
@@ -504,16 +515,16 @@ TEST(TrimInputCountInline) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n0);
n2->TrimInputCount(1);
CHECK_INPUTS(n2, n0);
CHECK_USES(n0, n2);
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator2, n0, n0);
n2->TrimInputCount(0);
CHECK_INPUTS(n2, NONE);
CHECK_USES(n0, NONE);
@@ -526,8 +537,8 @@ TEST(TrimInputCountOutOfLine1) {
Graph graph(&zone);
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
n1->AppendInput(graph.zone(), n0);
CHECK_INPUTS(n1, n0);
CHECK_USES(n0, n1);
@@ -538,8 +549,8 @@ TEST(TrimInputCountOutOfLine1) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
n1->AppendInput(graph.zone(), n0);
CHECK_EQ(1, n1->InputCount());
n1->TrimInputCount(0);
@@ -548,9 +559,9 @@ TEST(TrimInputCountOutOfLine1) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator0);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n1);
CHECK_INPUTS(n2, n0, n1);
@@ -562,9 +573,9 @@ TEST(TrimInputCountOutOfLine1) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator0);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n1);
CHECK_INPUTS(n2, n0, n1);
@@ -576,9 +587,9 @@ TEST(TrimInputCountOutOfLine1) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator0);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n1);
CHECK_INPUTS(n2, n0, n1);
@@ -590,8 +601,8 @@ TEST(TrimInputCountOutOfLine1) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator0);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n0);
CHECK_INPUTS(n2, n0, n0);
@@ -602,8 +613,8 @@ TEST(TrimInputCountOutOfLine1) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator0);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n0);
CHECK_INPUTS(n2, n0, n0);
@@ -620,9 +631,9 @@ TEST(TrimInputCountOutOfLine2) {
Graph graph(&zone);
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
n2->AppendInput(graph.zone(), n1);
CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(2);
@@ -633,9 +644,9 @@ TEST(TrimInputCountOutOfLine2) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
n2->AppendInput(graph.zone(), n1);
CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(1);
@@ -646,9 +657,9 @@ TEST(TrimInputCountOutOfLine2) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
n2->AppendInput(graph.zone(), n1);
CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(0);
@@ -659,8 +670,8 @@ TEST(TrimInputCountOutOfLine2) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
n2->AppendInput(graph.zone(), n0);
CHECK_INPUTS(n2, n0, n0);
CHECK_USES(n0, n2, n2);
@@ -671,8 +682,8 @@ TEST(TrimInputCountOutOfLine2) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n2 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n2 = graph.NewNode(&dummy_operator1, n0);
n2->AppendInput(graph.zone(), n0);
CHECK_EQ(2, n2->InputCount());
CHECK_EQ(2, n0->UseCount());
@@ -689,14 +700,14 @@ TEST(NullAllInputs) {
Graph graph(&zone);
for (int i = 0; i < 2; i++) {
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
Node* n2;
if (i == 0) {
- n2 = graph.NewNode(&dummy_operator, n0, n1);
+ n2 = graph.NewNode(&dummy_operator2, n0, n1);
CHECK_INPUTS(n2, n0, n1);
} else {
- n2 = graph.NewNode(&dummy_operator, n0);
+ n2 = graph.NewNode(&dummy_operator1, n0);
CHECK_INPUTS(n2, n0);
n2->AppendInput(graph.zone(), n1); // with out-of-line input.
CHECK_INPUTS(n2, n0, n1);
@@ -718,8 +729,8 @@ TEST(NullAllInputs) {
}
{
- Node* n0 = graph.NewNode(&dummy_operator);
- Node* n1 = graph.NewNode(&dummy_operator, n0);
+ Node* n0 = graph.NewNode(&dummy_operator0);
+ Node* n1 = graph.NewNode(&dummy_operator1, n0);
n1->ReplaceInput(0, n1); // self-reference.
CHECK_INPUTS(n0, NONE);
@@ -741,13 +752,13 @@ TEST(AppendAndTrim) {
Graph graph(&zone);
Node* nodes[] = {
- graph.NewNode(&dummy_operator), graph.NewNode(&dummy_operator),
- graph.NewNode(&dummy_operator), graph.NewNode(&dummy_operator),
- graph.NewNode(&dummy_operator)};
+ graph.NewNode(&dummy_operator0), graph.NewNode(&dummy_operator0),
+ graph.NewNode(&dummy_operator0), graph.NewNode(&dummy_operator0),
+ graph.NewNode(&dummy_operator0)};
int max = static_cast<int>(arraysize(nodes));
- Node* last = graph.NewNode(&dummy_operator);
+ Node* last = graph.NewNode(&dummy_operator0);
for (int i = 0; i < max; i++) {
last->AppendInput(graph.zone(), nodes[i]);
diff --git a/deps/v8/test/cctest/compiler/test-osr.cc b/deps/v8/test/cctest/compiler/test-osr.cc
index 80dbccc633..356cbd24a7 100644
--- a/deps/v8/test/cctest/compiler/test-osr.cc
+++ b/deps/v8/test/cctest/compiler/test-osr.cc
@@ -93,22 +93,16 @@ class OsrDeconstructorTester : public HandleAndZoneScope {
return graph.NewNode(common.Phi(kMachAnyTagged, count), count + 1, inputs);
}
- Node* NewLoop(bool is_osr, int num_backedges, Node* entry = NULL) {
- CHECK_LT(num_backedges, 4);
- CHECK_GE(num_backedges, 0);
- int count = 1 + num_backedges;
- if (entry == NULL) entry = osr_normal_entry;
- Node* inputs[5] = {entry, self, self, self, self};
+ Node* NewLoop(bool is_osr, int num_backedges, Node* entry = nullptr) {
+ if (entry == nullptr) entry = osr_normal_entry;
+ Node* loop = graph.NewNode(common.Loop(1), entry);
if (is_osr) {
- count = 2 + num_backedges;
- inputs[1] = osr_loop_entry;
+ loop->AppendInput(graph.zone(), osr_loop_entry);
}
-
- Node* loop = graph.NewNode(common.Loop(count), count, inputs);
- for (int i = 0; i < loop->InputCount(); i++) {
- if (loop->InputAt(i) == self) loop->ReplaceInput(i, loop);
+ for (int i = 0; i < num_backedges; i++) {
+ loop->AppendInput(graph.zone(), loop);
}
-
+ NodeProperties::ChangeOp(loop, common.Loop(loop->InputCount()));
return loop;
}
@@ -497,8 +491,7 @@ TEST(Deconstruct_osr_nested3) {
loop0.branch->ReplaceInput(0, loop0_cntr);
// middle loop.
- Node* loop1 = T.graph.NewNode(T.common.Loop(2), loop0.if_true, T.self);
- loop1->ReplaceInput(0, loop0.if_true);
+ Node* loop1 = T.graph.NewNode(T.common.Loop(1), loop0.if_true);
Node* loop1_phi = T.graph.NewNode(T.common.Phi(kMachAnyTagged, 2), loop0_cntr,
loop0_cntr, loop1);
@@ -521,7 +514,8 @@ TEST(Deconstruct_osr_nested3) {
Node* if_false = T.graph.NewNode(T.common.IfFalse(), branch);
loop0.loop->ReplaceInput(1, if_true);
- loop1->ReplaceInput(1, if_false);
+ loop1->AppendInput(T.graph.zone(), if_false);
+ NodeProperties::ChangeOp(loop1, T.common.Loop(2));
Node* ret =
T.graph.NewNode(T.common.Return(), loop0_cntr, T.start, loop0.exit);
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
index 8996718644..b14e823ee7 100644
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ b/deps/v8/test/cctest/compiler/test-pipeline.cc
@@ -19,6 +19,7 @@ static void RunPipeline(Zone* zone, const char* source) {
ParseInfo parse_info(zone, function);
CHECK(Compiler::ParseAndAnalyze(&parse_info));
CompilationInfo info(&parse_info);
+ info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
Pipeline pipeline(&info);
Handle<Code> code = pipeline.GenerateCode();
diff --git a/deps/v8/test/cctest/compiler/test-representation-change.cc b/deps/v8/test/cctest/compiler/test-representation-change.cc
index 216f9fd847..913808f8bc 100644
--- a/deps/v8/test/cctest/compiler/test-representation-change.cc
+++ b/deps/v8/test/cctest/compiler/test-representation-change.cc
@@ -71,7 +71,7 @@ class RepresentationChangerTester : public HandleAndZoneScope,
void CheckHeapConstant(Node* n, HeapObject* expected) {
HeapObjectMatcher m(n);
CHECK(m.HasValue());
- CHECK_EQ(expected, *m.Value().handle());
+ CHECK_EQ(expected, *m.Value());
}
void CheckNumberConstant(Node* n, double expected) {
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
new file mode 100644
index 0000000000..06cb2f754f
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -0,0 +1,258 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <utility>
+
+#include "src/v8.h"
+
+#include "src/compiler/pipeline.h"
+#include "src/execution.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/interpreter.h"
+#include "src/parser.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+static const char kFunctionName[] = "f";
+
+
+static MaybeHandle<Object> CallFunction(Isolate* isolate,
+ Handle<JSFunction> function) {
+ return Execution::Call(isolate, function,
+ isolate->factory()->undefined_value(), 0, nullptr);
+}
+
+
+template <class... A>
+static MaybeHandle<Object> CallFunction(Isolate* isolate,
+ Handle<JSFunction> function,
+ A... args) {
+ Handle<Object> argv[] = {args...};
+ return Execution::Call(isolate, function,
+ isolate->factory()->undefined_value(), sizeof...(args),
+ argv);
+}
+
+
+template <class... A>
+class BytecodeGraphCallable {
+ public:
+ BytecodeGraphCallable(Isolate* isolate, Handle<JSFunction> function)
+ : isolate_(isolate), function_(function) {}
+ virtual ~BytecodeGraphCallable() {}
+
+ MaybeHandle<Object> operator()(A... args) {
+ return CallFunction(isolate_, function_, args...);
+ }
+
+ private:
+ Isolate* isolate_;
+ Handle<JSFunction> function_;
+};
+
+
+class BytecodeGraphTester {
+ public:
+ BytecodeGraphTester(Isolate* isolate, Zone* zone, const char* script)
+ : isolate_(isolate), zone_(zone), script_(script) {
+ i::FLAG_ignition = true;
+ i::FLAG_always_opt = false;
+ i::FLAG_vector_stores = true;
+ // Set ignition filter flag via SetFlagsFromString to avoid double-free
+ // (or potential leak with StrDup() based on ownership confusion).
+ ScopedVector<char> ignition_filter(64);
+ SNPrintF(ignition_filter, "--ignition-filter=%s", kFunctionName);
+ FlagList::SetFlagsFromString(ignition_filter.start(),
+ ignition_filter.length());
+ // Ensure handler table is generated.
+ isolate->interpreter()->Initialize();
+ }
+ virtual ~BytecodeGraphTester() {}
+
+ template <class... A>
+ BytecodeGraphCallable<A...> GetCallable() {
+ return BytecodeGraphCallable<A...>(isolate_, GetFunction());
+ }
+
+ private:
+ Isolate* isolate_;
+ Zone* zone_;
+ const char* script_;
+
+ Handle<JSFunction> GetFunction() {
+ CompileRun(script_);
+ Local<Function> api_function =
+ Local<Function>::Cast(CcTest::global()->Get(v8_str(kFunctionName)));
+ Handle<JSFunction> function = v8::Utils::OpenHandle(*api_function);
+ CHECK(function->shared()->HasBytecodeArray());
+
+ ParseInfo parse_info(zone_, function);
+
+ CompilationInfo compilation_info(&parse_info);
+ compilation_info.SetOptimizing(BailoutId::None(), Handle<Code>());
+ Parser parser(&parse_info);
+ CHECK(parser.Parse(&parse_info));
+ compiler::Pipeline pipeline(&compilation_info);
+ Handle<Code> code = pipeline.GenerateCode();
+ function->ReplaceCode(*code);
+
+ return function;
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeGraphTester);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <int N>
+struct ExpectedSnippet {
+ const char* code_snippet;
+ Handle<Object> return_value_and_parameters[N + 1];
+
+ inline Handle<Object> return_value() const {
+ return return_value_and_parameters[0];
+ }
+
+ inline Handle<Object> parameter(int i) const {
+ return return_value_and_parameters[1 + i];
+ }
+};
+
+
+TEST(BytecodeGraphBuilderReturnStatements) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return;", {factory->undefined_value()}},
+ {"return null;", {factory->null_value()}},
+ {"return true;", {factory->true_value()}},
+ {"return false;", {factory->false_value()}},
+ {"return 0;", {factory->NewNumberFromInt(0)}},
+ {"return +1;", {factory->NewNumberFromInt(1)}},
+ {"return -1;", {factory->NewNumberFromInt(-1)}},
+ {"return +127;", {factory->NewNumberFromInt(127)}},
+ {"return -128;", {factory->NewNumberFromInt(-128)}},
+ {"return 0.001;", {factory->NewNumber(0.001)}},
+ {"return 3.7e-60;", {factory->NewNumber(3.7e-60)}},
+ {"return -3.7e60;", {factory->NewNumber(-3.7e60)}},
+ {"return '';", {factory->NewStringFromStaticChars("")}},
+ {"return 'catfood';", {factory->NewStringFromStaticChars("catfood")}}
+ // TODO(oth): {"return NaN;", {factory->NewNumber(NAN)}}
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderPrimitiveExpressions) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<0> snippets[] = {
+ {"return 1 + 1;", {factory->NewNumberFromInt(2)}},
+ {"return 20 - 30;", {factory->NewNumberFromInt(-10)}},
+ {"return 4 * 100;", {factory->NewNumberFromInt(400)}},
+ {"return 100 / 5;", {factory->NewNumberFromInt(20)}},
+ {"return 25 % 7;", {factory->NewNumberFromInt(4)}},
+ };
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s() { %s }\n%s();", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
+
+
+TEST(BytecodeGraphBuilderTwoParameterTests) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+ Factory* factory = isolate->factory();
+
+ ExpectedSnippet<2> snippets[] = {
+ // Integers
+ {"return p1 + p2;",
+ {factory->NewNumberFromInt(-70), factory->NewNumberFromInt(3),
+ factory->NewNumberFromInt(-73)}},
+ {"return p1 + p2 + 3;",
+ {factory->NewNumberFromInt(1139044), factory->NewNumberFromInt(300),
+ factory->NewNumberFromInt(1138741)}},
+ {"return p1 - p2;",
+ {factory->NewNumberFromInt(1100), factory->NewNumberFromInt(1000),
+ factory->NewNumberFromInt(-100)}},
+ {"return p1 * p2;",
+ {factory->NewNumberFromInt(-100000), factory->NewNumberFromInt(1000),
+ factory->NewNumberFromInt(-100)}},
+ {"return p1 / p2;",
+ {factory->NewNumberFromInt(-10), factory->NewNumberFromInt(1000),
+ factory->NewNumberFromInt(-100)}},
+ {"return p1 % p2;",
+ {factory->NewNumberFromInt(5), factory->NewNumberFromInt(373),
+ factory->NewNumberFromInt(16)}},
+ // Doubles
+ {"return p1 + p2;",
+ {factory->NewHeapNumber(9.999), factory->NewHeapNumber(3.333),
+ factory->NewHeapNumber(6.666)}},
+ {"return p1 - p2;",
+ {factory->NewHeapNumber(-3.333), factory->NewHeapNumber(3.333),
+ factory->NewHeapNumber(6.666)}},
+ {"return p1 * p2;",
+ {factory->NewHeapNumber(3.333 * 6.666), factory->NewHeapNumber(3.333),
+ factory->NewHeapNumber(6.666)}},
+ {"return p1 / p2;",
+ {factory->NewHeapNumber(2.25), factory->NewHeapNumber(9),
+ factory->NewHeapNumber(4)}},
+ // Strings
+ {"return p1 + p2;",
+ {factory->NewStringFromStaticChars("abcdef"),
+ factory->NewStringFromStaticChars("abc"),
+ factory->NewStringFromStaticChars("def")}}};
+
+ size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
+ for (size_t i = 0; i < num_snippets; i++) {
+ ScopedVector<char> script(1024);
+ SNPrintF(script, "function %s(p1, p2) { %s }\n%s(0, 0);", kFunctionName,
+ snippets[i].code_snippet, kFunctionName);
+
+ BytecodeGraphTester tester(isolate, zone, script.start());
+ auto callable = tester.GetCallable<Handle<Object>, Handle<Object>>();
+ Handle<Object> return_value =
+ callable(snippets[i].parameter(0), snippets[i].parameter(1))
+ .ToHandleChecked();
+ CHECK(return_value->SameValue(*snippets[i].return_value()));
+ }
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index aedf668f44..7d1ae4f471 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -63,7 +63,6 @@ TEST(DeoptSimpleInExpr) {
TEST(DeoptExceptionHandlerCatch) {
FLAG_allow_natives_syntax = true;
- FLAG_turbo_try_catch = true;
FunctionTester T(
"(function f() {"
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index 1b2559fc5f..05fb6ad46d 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -43,10 +43,11 @@ void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
const uint32_t kRestrictedInliningFlags =
- CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled;
+ CompilationInfo::kFunctionContextSpecializing |
+ CompilationInfo::kTypingEnabled;
const uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
- CompilationInfo::kContextSpecializing |
+ CompilationInfo::kFunctionContextSpecializing |
CompilationInfo::kTypingEnabled;
} // namespace
@@ -161,7 +162,8 @@ TEST(InlineOmitArguments) {
"(function () {"
" var x = 42;"
" function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
- " return (function (s,t) { return bar(s); });"
+ " function foo(s, t) { return bar(s); };"
+ " return foo;"
"})();",
kInlineFlags);
@@ -170,6 +172,22 @@ TEST(InlineOmitArguments) {
}
+TEST(InlineOmitArgumentsObject) {
+ FunctionTester T(
+ "(function () {"
+ " function bar(s, t, u, v) { AssertInlineCount(2); return arguments; };"
+ " function foo(s, t) { var args = bar(s);"
+ " return args.length == 1 &&"
+ " args[0] == 11; };"
+ " return foo;"
+ "})();",
+ kInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.true_value(), T.Val(11), T.undefined());
+}
+
+
TEST(InlineOmitArgumentsDeopt) {
FunctionTester T(
"(function () {"
@@ -192,7 +210,7 @@ TEST(InlineSurplusArguments) {
"(function () {"
" var x = 42;"
" function foo(s) { AssertInlineCount(2); return x + s; };"
- " function bar(s,t) { return foo(s,t,13); };"
+ " function bar(s, t) { return foo(s, t, 13); };"
" return bar;"
"})();",
kInlineFlags);
@@ -202,6 +220,24 @@ TEST(InlineSurplusArguments) {
}
+TEST(InlineSurplusArgumentsObject) {
+ FunctionTester T(
+ "(function () {"
+ " function foo(s) { AssertInlineCount(2); return arguments; };"
+ " function bar(s, t) { var args = foo(s, t, 13);"
+ " return args.length == 3 &&"
+ " args[0] == 11 &&"
+ " args[1] == 12 &&"
+ " args[2] == 13; };"
+ " return bar;"
+ "})();",
+ kInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.true_value(), T.Val(11), T.Val(12));
+}
+
+
TEST(InlineSurplusArgumentsDeopt) {
FunctionTester T(
"(function () {"
@@ -415,20 +451,6 @@ TEST(InlineIntrinsicIsSmi) {
}
-TEST(InlineIntrinsicIsNonNegativeSmi) {
- FunctionTester T(
- "(function () {"
- " var x = 42;"
- " function bar(s,t) { return %_IsNonNegativeSmi(x); };"
- " return bar;"
- "})();",
- kInlineFlags);
-
- InstallAssertInlineCountHelper(CcTest::isolate());
- T.CheckCall(T.true_value(), T.Val(12), T.Val(4));
-}
-
-
TEST(InlineIntrinsicIsArray) {
FunctionTester T(
"(function () {"
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index 1fa37748c6..e92d520a42 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -128,32 +128,6 @@ TEST(IsMinusZero) {
}
-TEST(IsNonNegativeSmi) {
- FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })", flags);
-
- T.CheckTrue(T.Val(1));
- T.CheckFalse(T.Val(1.1));
- T.CheckFalse(T.Val(-0.0));
- T.CheckFalse(T.Val(-2));
- T.CheckFalse(T.Val(-2.3));
- T.CheckFalse(T.undefined());
-}
-
-
-TEST(IsObject) {
- FunctionTester T("(function(a) { return %_IsObject(a); })", flags);
-
- T.CheckFalse(T.NewObject("(function() {})"));
- T.CheckTrue(T.NewObject("([1])"));
- T.CheckTrue(T.NewObject("({})"));
- T.CheckTrue(T.NewObject("(/x/)"));
- T.CheckFalse(T.undefined());
- T.CheckTrue(T.null());
- T.CheckFalse(T.Val("x"));
- T.CheckFalse(T.Val(1));
-}
-
-
TEST(IsRegExp) {
FunctionTester T("(function(a) { return %_IsRegExp(a); })", flags);
@@ -243,17 +217,6 @@ TEST(OneByteSeqStringSetChar) {
}
-TEST(NewConsString) {
- FunctionTester T(
- "(function() { "
- " return %_NewConsString(14, true, 'abcdefghi', 'jklmn');"
- " })",
- flags);
-
- T.CheckCall(T.Val("abcdefghijklmn"));
-}
-
-
TEST(SetValueOf) {
FunctionTester T("(function(a,b) { return %_SetValueOf(a,b); })", flags);
diff --git a/deps/v8/test/cctest/compiler/test-run-jscalls.cc b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
index 893c2fa460..621c1c339f 100644
--- a/deps/v8/test/cctest/compiler/test-run-jscalls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jscalls.cc
@@ -134,30 +134,26 @@ TEST(ConstructorCall) {
// TODO(titzer): factor these out into test-runtime-calls.cc
TEST(RuntimeCallCPP2) {
FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a,b) { return %NumberAdd(a, b); })");
+ FunctionTester T("(function(a,b) { return %NumberImul(a, b); })");
- T.CheckCall(T.Val(65), T.Val(42), T.Val(23));
- T.CheckCall(T.Val(19), T.Val(42), T.Val(-23));
- T.CheckCall(T.Val(6.5), T.Val(4.2), T.Val(2.3));
+ T.CheckCall(T.Val(2730), T.Val(42), T.Val(65));
+ T.CheckCall(T.Val(798), T.Val(42), T.Val(19));
}
TEST(RuntimeCallJS) {
FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a) { return %$toString(a); })");
-
- T.CheckCall(T.Val("23"), T.Val(23), T.undefined());
- T.CheckCall(T.Val("4.2"), T.Val(4.2), T.undefined());
- T.CheckCall(T.Val("str"), T.Val("str"), T.undefined());
- T.CheckCall(T.Val("true"), T.true_value(), T.undefined());
- T.CheckCall(T.Val("false"), T.false_value(), T.undefined());
- T.CheckCall(T.Val("undefined"), T.undefined(), T.undefined());
+ FunctionTester T("(function(a) { return %to_number_fun(a); })");
+
+ T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+ T.CheckCall(T.Val(4.2), T.Val(4.2), T.undefined());
+ T.CheckCall(T.Val(1), T.true_value(), T.undefined());
}
TEST(RuntimeCallInline) {
FLAG_allow_natives_syntax = true;
- FunctionTester T("(function(a) { return %_IsObject(a); })");
+ FunctionTester T("(function(a) { return %_IsSpecObject(a); })");
T.CheckCall(T.false_value(), T.Val(23), T.undefined());
T.CheckCall(T.false_value(), T.Val(4.2), T.undefined());
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index 0e1977b720..2ba8fdfcc8 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -59,7 +59,6 @@ TEST(ThrowMessageDirectly) {
TEST(ThrowMessageIndirectly) {
- i::FLAG_turbo_try_catch = true;
i::FLAG_turbo_try_finally = true;
static const char* src =
"(function(a, b) {"
@@ -81,7 +80,6 @@ TEST(ThrowMessageIndirectly) {
TEST(Catch) {
- i::FLAG_turbo_try_catch = true;
const char* src =
"(function(a,b) {"
" var r = '-';"
@@ -100,7 +98,6 @@ TEST(Catch) {
TEST(CatchNested) {
- i::FLAG_turbo_try_catch = true;
const char* src =
"(function(a,b) {"
" var r = '-';"
@@ -124,7 +121,6 @@ TEST(CatchNested) {
TEST(CatchBreak) {
- i::FLAG_turbo_try_catch = true;
const char* src =
"(function(a,b) {"
" var r = '-';"
@@ -149,7 +145,6 @@ TEST(CatchBreak) {
TEST(CatchCall) {
- i::FLAG_turbo_try_catch = true;
const char* src =
"(function(fun) {"
" var r = '-';"
@@ -213,7 +208,6 @@ TEST(FinallyBreak) {
TEST(DeoptTry) {
- i::FLAG_turbo_try_catch = true;
const char* src =
"(function f(a) {"
" try {"
@@ -230,7 +224,6 @@ TEST(DeoptTry) {
TEST(DeoptCatch) {
- i::FLAG_turbo_try_catch = true;
const char* src =
"(function f(a) {"
" try {"
diff --git a/deps/v8/test/cctest/compiler/test-run-jsobjects.cc b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
new file mode 100644
index 0000000000..242de4dbf7
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-jsobjects.cc
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(ArgumentsMapped) {
+ FunctionTester T("(function(a) { return arguments; })");
+
+ Handle<Object> arguments;
+ T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
+ CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
+ CHECK(JSObject::cast(*arguments)->HasSloppyArgumentsElements());
+ Handle<String> l = T.isolate->factory()->length_string();
+ Handle<Object> length = JSObject::GetProperty(arguments, l).ToHandleChecked();
+ CHECK_EQ(4, length->Number());
+}
+
+
+TEST(ArgumentsUnmapped) {
+ FunctionTester T("(function(a) { 'use strict'; return arguments; })");
+
+ Handle<Object> arguments;
+ T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
+ CHECK(arguments->IsJSObject() && !arguments->IsJSArray());
+ CHECK(!JSObject::cast(*arguments)->HasSloppyArgumentsElements());
+ Handle<String> l = T.isolate->factory()->length_string();
+ Handle<Object> length = JSObject::GetProperty(arguments, l).ToHandleChecked();
+ CHECK_EQ(4, length->Number());
+}
+
+
+TEST(ArgumentsRest) {
+ FLAG_harmony_rest_parameters = true;
+ FunctionTester T("(function(a, ...args) { return args; })");
+
+ Handle<Object> arguments;
+ T.Call(T.Val(19), T.Val(23), T.Val(42), T.Val(65)).ToHandle(&arguments);
+ CHECK(arguments->IsJSObject() && arguments->IsJSArray());
+ CHECK(!JSObject::cast(*arguments)->HasSloppyArgumentsElements());
+ Handle<String> l = T.isolate->factory()->length_string();
+ Handle<Object> length = JSObject::GetProperty(arguments, l).ToHandleChecked();
+ CHECK_EQ(3, length->Number());
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 8b14dab46c..078b8c25cd 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -72,7 +72,7 @@ TEST(CodeGenInt32Binop) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
Node* a = Int32Input(&m, j);
Node* b = Int32Input(&m, k);
- m.Return(m.NewNode(kOps[i], a, b));
+ m.Return(m.AddNode(kOps[i], a, b));
m.GenerateCode();
}
}
@@ -132,7 +132,7 @@ TEST(CodeGenInt64Binop) {
RawMachineAssemblerTester<int64_t> m(kMachInt64, kMachInt64);
Node* a = Int64Input(&m, j);
Node* b = Int64Input(&m, k);
- m.Return(m.NewNode(kOps[i], a, b));
+ m.Return(m.AddNode(kOps[i], a, b));
m.GenerateCode();
}
}
@@ -626,7 +626,7 @@ TEST(RunSwitch4) {
m.Bind(&end);
const int num_results = static_cast<int>(arraysize(results));
Node* phi =
- m.NewNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
+ m.AddNode(m.common()->Phi(kMachInt32, num_results), num_results, results);
m.Return(phi);
for (size_t i = 0; i < kNumValues; ++i) {
@@ -1053,7 +1053,7 @@ TEST(RunInt32AddInBranch) {
kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1),
+ m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -1145,7 +1145,7 @@ TEST(RunInt32AddInComparison) {
kMachUint32);
m.Return(m.Word32Equal(
m.Int32Add(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+ m.AddNode(shops[n], m.Parameter(1), m.Parameter(2))),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -1390,7 +1390,7 @@ TEST(RunInt32SubInBranch) {
kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1),
+ m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -1482,7 +1482,7 @@ TEST(RunInt32SubInComparison) {
kMachUint32);
m.Return(m.Word32Equal(
m.Int32Sub(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+ m.AddNode(shops[n], m.Parameter(1), m.Parameter(2))),
m.Int32Constant(0)));
FOR_UINT32_INPUTS(i) {
FOR_INT32_INPUTS(j) {
@@ -2059,7 +2059,7 @@ TEST(RunWord32AndInBranch) {
kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1),
+ m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -2287,7 +2287,7 @@ TEST(RunWord32OrInBranch) {
kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1),
+ m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -2512,7 +2512,7 @@ TEST(RunWord32XorInBranch) {
kMachUint32);
MLabel blocka, blockb;
m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
- m.NewNode(shops[n], m.Parameter(1),
+ m.AddNode(shops[n], m.Parameter(1),
m.Parameter(2))),
m.Int32Constant(0)),
&blocka, &blockb);
@@ -3022,7 +3022,7 @@ TEST(RunDeadInt32Binops) {
for (size_t i = 0; i < arraysize(kOps); ++i) {
RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
int32_t constant = static_cast<int32_t>(0x55555 + i);
- m.NewNode(kOps[i], m.Parameter(0), m.Parameter(1));
+ m.AddNode(kOps[i], m.Parameter(0), m.Parameter(1));
m.Return(m.Int32Constant(constant));
CHECK_EQ(constant, m.Call(1, 1));
@@ -3137,9 +3137,9 @@ TEST(RunFloat32Binop) {
for (int i = 0; ops[i] != NULL; i++) {
for (int j = 0; inputs[j] != NULL; j += 2) {
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.NewNode(inputs[j]);
- Node* b = m.NewNode(inputs[j + 1]);
- Node* binop = m.NewNode(ops[i], a, b);
+ Node* a = m.AddNode(inputs[j]);
+ Node* b = m.AddNode(inputs[j + 1]);
+ Node* binop = m.AddNode(ops[i], a, b);
Node* base = m.PointerConstant(&result);
Node* zero = m.IntPtrConstant(0);
m.Store(kMachFloat32, base, zero, binop);
@@ -3173,9 +3173,9 @@ TEST(RunFloat64Binop) {
for (int i = 0; ops[i] != NULL; i++) {
for (int j = 0; inputs[j] != NULL; j += 2) {
RawMachineAssemblerTester<int32_t> m;
- Node* a = m.NewNode(inputs[j]);
- Node* b = m.NewNode(inputs[j + 1]);
- Node* binop = m.NewNode(ops[i], a, b);
+ Node* a = m.AddNode(inputs[j]);
+ Node* b = m.AddNode(inputs[j + 1]);
+ Node* binop = m.AddNode(ops[i], a, b);
Node* base = m.PointerConstant(&result);
Node* zero = m.Int32Constant(0);
m.Store(kMachFloat64, base, zero, binop);
@@ -3196,7 +3196,7 @@ TEST(RunDeadFloat32Binops) {
for (int i = 0; ops[i] != NULL; i++) {
RawMachineAssemblerTester<int32_t> m;
int constant = 0x53355 + i;
- m.NewNode(ops[i], m.Float32Constant(0.1f), m.Float32Constant(1.11f));
+ m.AddNode(ops[i], m.Float32Constant(0.1f), m.Float32Constant(1.11f));
m.Return(m.Int32Constant(constant));
CHECK_EQ(constant, m.Call());
}
@@ -3213,7 +3213,7 @@ TEST(RunDeadFloat64Binops) {
for (int i = 0; ops[i] != NULL; i++) {
RawMachineAssemblerTester<int32_t> m;
int constant = 0x53355 + i;
- m.NewNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
+ m.AddNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
m.Return(m.Int32Constant(constant));
CHECK_EQ(constant, m.Call());
}
@@ -4289,7 +4289,7 @@ TEST(RunFloat64UnorderedCompare) {
Node* a = m.Float64Constant(*i);
Node* b = m.Float64Constant(nan);
if (j == 1) std::swap(a, b);
- m.Return(m.NewNode(operators[o], a, b));
+ m.Return(m.AddNode(operators[o], a, b));
CHECK_EQ(0, m.Call());
}
}
@@ -5277,5 +5277,113 @@ TEST(RunCallCFunction8) {
CHECK_EQ(x * 8, m.Call(x));
}
}
-
#endif // USE_SIMULATOR
+
+#if V8_TARGET_ARCH_64_BIT
+// TODO(titzer): run int64 tests on all platforms when supported.
+TEST(RunCheckedLoadInt64) {
+ int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
+ RawMachineAssemblerTester<int64_t> m(kMachInt32);
+ Node* base = m.PointerConstant(buffer);
+ Node* index = m.Parameter(0);
+ Node* length = m.Int32Constant(16);
+ Node* load =
+ m.AddNode(m.machine()->CheckedLoad(kMachInt64), base, index, length);
+ m.Return(load);
+
+ CHECK_EQ(buffer[0], m.Call(0));
+ CHECK_EQ(buffer[1], m.Call(8));
+ CHECK_EQ(0, m.Call(16));
+}
+
+
+TEST(RunCheckedStoreInt64) {
+ const int64_t write = 0x5566778899aabbLL;
+ const int64_t before = 0x33bbccddeeff0011LL;
+ int64_t buffer[] = {before, before};
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ Node* base = m.PointerConstant(buffer);
+ Node* index = m.Parameter(0);
+ Node* length = m.Int32Constant(16);
+ Node* value = m.Int64Constant(write);
+ Node* store = m.AddNode(m.machine()->CheckedStore(kMachInt64), base, index,
+ length, value);
+ USE(store);
+ m.Return(m.Int32Constant(11));
+
+ CHECK_EQ(11, m.Call(16));
+ CHECK_EQ(before, buffer[0]);
+ CHECK_EQ(before, buffer[1]);
+
+ CHECK_EQ(11, m.Call(0));
+ CHECK_EQ(write, buffer[0]);
+ CHECK_EQ(before, buffer[1]);
+
+ CHECK_EQ(11, m.Call(8));
+ CHECK_EQ(write, buffer[0]);
+ CHECK_EQ(write, buffer[1]);
+}
+
+
+TEST(RunBitcastInt64ToFloat64) {
+ int64_t input = 1;
+ double output = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &output, kMachFloat64,
+ m.BitcastInt64ToFloat64(m.LoadFromPointer(&input, kMachInt64)));
+ m.Return(m.Int32Constant(11));
+ FOR_INT64_INPUTS(i) {
+ input = *i;
+ CHECK_EQ(11, m.Call());
+ double expected = bit_cast<double>(input);
+ CHECK_EQ(bit_cast<int64_t>(expected), bit_cast<int64_t>(output));
+ }
+}
+
+
+TEST(RunBitcastFloat64ToInt64) {
+ double input = 0;
+ int64_t output = 0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &output, kMachInt64,
+ m.BitcastFloat64ToInt64(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Return(m.Int32Constant(11));
+ FOR_FLOAT64_INPUTS(i) {
+ input = *i;
+ CHECK_EQ(11, m.Call());
+ double expected = bit_cast<int64_t>(input);
+ CHECK_EQ(expected, output);
+ }
+}
+#endif
+
+
+TEST(RunBitcastFloat32ToInt32) {
+ float input = 32.25;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.BitcastFloat32ToInt32(m.LoadFromPointer(&input, kMachFloat32)));
+ FOR_FLOAT32_INPUTS(i) {
+ input = *i;
+ int32_t expected = bit_cast<int32_t>(input);
+ CHECK_EQ(expected, m.Call());
+ }
+}
+
+
+TEST(RunBitcastInt32ToFloat32) {
+ int32_t input = 1;
+ float output = 0.0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.StoreToPointer(
+ &output, kMachFloat32,
+ m.BitcastInt32ToFloat32(m.LoadFromPointer(&input, kMachInt32)));
+ m.Return(m.Int32Constant(11));
+ FOR_INT32_INPUTS(i) {
+ input = *i;
+ CHECK_EQ(11, m.Call());
+ float expected = bit_cast<float>(input);
+ CHECK_EQ(bit_cast<int32_t>(expected), bit_cast<int32_t>(output));
+ }
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-native-calls.cc b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
index 2e255c7729..bc6acf1bc9 100644
--- a/deps/v8/test/cctest/compiler/test-run-native-calls.cc
+++ b/deps/v8/test/cctest/compiler/test-run-native-calls.cc
@@ -244,8 +244,9 @@ class Int32Signature : public MachineSignature {
Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
+ CompilationInfo info("testing", isolate, graph->zone());
Handle<Code> code =
- Pipeline::GenerateCodeForTesting(isolate, desc, graph, schedule);
+ Pipeline::GenerateCodeForTesting(&info, desc, graph, schedule);
CHECK(!code.is_null());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_opt_code) {
@@ -267,8 +268,7 @@ Handle<Code> WrapWithCFunction(Handle<Code> inner, CallDescriptor* desc) {
GraphAndBuilders& b = caller;
Node* start = b.graph()->NewNode(b.common()->Start(param_count + 3));
b.graph()->SetStart(start);
- Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner);
- Node* target = b.graph()->NewNode(b.common()->HeapConstant(unique));
+ Node* target = b.graph()->NewNode(b.common()->HeapConstant(inner));
// Add arguments to the call.
Node** args = zone.NewArray<Node*>(param_count + 3);
@@ -444,9 +444,7 @@ class Computer {
Graph graph(&zone);
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
- Unique<HeapObject> unique =
- Unique<HeapObject>::CreateUninitialized(inner);
- Node* target = raw.HeapConstant(unique);
+ Node* target = raw.HeapConstant(inner);
Node** args = zone.NewArray<Node*>(num_params);
for (int i = 0; i < num_params; i++) {
args[i] = io.MakeConstant(raw, io.input[i]);
@@ -479,9 +477,7 @@ class Computer {
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
Node* base = raw.PointerConstant(io.input);
- Unique<HeapObject> unique =
- Unique<HeapObject>::CreateUninitialized(inner);
- Node* target = raw.HeapConstant(unique);
+ Node* target = raw.HeapConstant(inner);
Node** args = zone.NewArray<Node*>(kMaxParamCount);
for (int i = 0; i < num_params; i++) {
args[i] = io.LoadInput(raw, base, i);
@@ -578,8 +574,7 @@ static void CopyTwentyInt32(CallDescriptor* desc) {
CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
RawMachineAssembler raw(isolate, &graph, cdesc);
Node* base = raw.PointerConstant(input);
- Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner);
- Node* target = raw.HeapConstant(unique);
+ Node* target = raw.HeapConstant(inner);
Node** args = zone.NewArray<Node*>(kNumParams);
for (int i = 0; i < kNumParams; i++) {
Node* offset = raw.Int32Constant(i * sizeof(int32_t));
@@ -952,8 +947,7 @@ static void Build_Select_With_Call(CallDescriptor* desc,
{
// Build a call to the function that does the select.
- Unique<HeapObject> unique = Unique<HeapObject>::CreateUninitialized(inner);
- Node* target = raw.HeapConstant(unique);
+ Node* target = raw.HeapConstant(inner);
Node** args = raw.zone()->NewArray<Node*>(num_params);
for (int i = 0; i < num_params; i++) {
args[i] = raw.Parameter(i);
@@ -983,3 +977,125 @@ TEST(Float64StackParamsToStackParams) {
Run_Computation<float64>(desc, Build_Select_With_Call<float64, 1>,
Compute_Select<float64, 1>, 1099);
}
+
+
+void MixedParamTest(int start) {
+ if (DISABLE_NATIVE_STACK_PARAMS) return;
+ if (RegisterConfiguration::ArchDefault()->num_double_registers() < 2) return;
+
+// TODO(titzer): mix in 64-bit types on all platforms when supported.
+#if V8_TARGET_ARCH_32_BIT
+ static MachineType types[] = {
+ kMachInt32, kMachFloat32, kMachFloat64, kMachInt32, kMachFloat64,
+ kMachFloat32, kMachFloat32, kMachFloat64, kMachInt32, kMachFloat32,
+ kMachInt32, kMachFloat64, kMachFloat64, kMachFloat32, kMachInt32,
+ kMachFloat64, kMachInt32, kMachFloat32};
+#else
+ static MachineType types[] = {
+ kMachInt32, kMachInt64, kMachFloat32, kMachFloat64, kMachInt32,
+ kMachFloat64, kMachFloat32, kMachInt64, kMachFloat64, kMachInt32,
+ kMachFloat32, kMachInt32, kMachFloat64, kMachFloat64, kMachInt64,
+ kMachInt32, kMachFloat64, kMachInt32, kMachFloat32};
+#endif
+
+ Isolate* isolate = CcTest::InitIsolateOnce();
+
+ // Build machine signature
+ MachineType* params = &types[start];
+ const int num_params = static_cast<int>(arraysize(types) - start);
+
+ // Build call descriptor
+ int parray[] = {0, 1};
+ int rarray[] = {0};
+ Allocator palloc(parray, 2, parray, 2);
+ Allocator ralloc(rarray, 1, rarray, 1);
+ RegisterConfig config(palloc, ralloc);
+
+ for (int which = 0; which < num_params; which++) {
+ Zone zone;
+ HandleScope scope(isolate);
+ MachineSignature::Builder builder(&zone, 1, num_params);
+ builder.AddReturn(params[which]);
+ for (int j = 0; j < num_params; j++) builder.AddParam(params[j]);
+ MachineSignature* sig = builder.Build();
+ CallDescriptor* desc = config.Create(&zone, sig);
+
+ Handle<Code> select;
+ {
+ // build the select.
+ Zone zone;
+ Graph graph(&zone);
+ RawMachineAssembler raw(isolate, &graph, desc);
+ raw.Return(raw.Parameter(which));
+ select = CompileGraph("Compute", desc, &graph, raw.Export());
+ }
+
+ {
+ // call the select.
+ Handle<Code> wrapper = Handle<Code>::null();
+ int32_t expected_ret;
+ char bytes[kDoubleSize];
+ V8_ALIGNED(8) char output[kDoubleSize];
+ int expected_size = 0;
+ CSignature0<int32_t> csig;
+ {
+ // Wrap the select code with a callable function that passes constants.
+ Zone zone;
+ Graph graph(&zone);
+ CallDescriptor* cdesc = Linkage::GetSimplifiedCDescriptor(&zone, &csig);
+ RawMachineAssembler raw(isolate, &graph, cdesc);
+ Node* target = raw.HeapConstant(select);
+ Node** args = zone.NewArray<Node*>(num_params);
+ int64_t constant = 0x0102030405060708;
+ for (int i = 0; i < num_params; i++) {
+ MachineType param_type = sig->GetParam(i);
+ Node* konst = nullptr;
+ if (param_type == kMachInt32) {
+ int32_t value[] = {static_cast<int32_t>(constant)};
+ konst = raw.Int32Constant(value[0]);
+ if (i == which) memcpy(bytes, value, expected_size = 4);
+ }
+ if (param_type == kMachInt64) {
+ int64_t value[] = {static_cast<int64_t>(constant)};
+ konst = raw.Int64Constant(value[0]);
+ if (i == which) memcpy(bytes, value, expected_size = 8);
+ }
+ if (param_type == kMachFloat32) {
+ float32 value[] = {static_cast<float32>(constant)};
+ konst = raw.Float32Constant(value[0]);
+ if (i == which) memcpy(bytes, value, expected_size = 4);
+ }
+ if (param_type == kMachFloat64) {
+ float64 value[] = {static_cast<float64>(constant)};
+ konst = raw.Float64Constant(value[0]);
+ if (i == which) memcpy(bytes, value, expected_size = 8);
+ }
+ CHECK_NOT_NULL(konst);
+
+ args[i] = konst;
+ constant += 0x1010101010101010;
+ }
+
+ Node* call = raw.CallN(desc, target, args);
+ Node* store = raw.StoreToPointer(output, sig->GetReturn(), call);
+ USE(store);
+ expected_ret = static_cast<int32_t>(constant);
+ raw.Return(raw.Int32Constant(expected_ret));
+ wrapper = CompileGraph("Select-mixed-wrapper-const", cdesc, &graph,
+ raw.Export());
+ }
+
+ CodeRunner<int32_t> runnable(isolate, wrapper, &csig);
+ CHECK_EQ(expected_ret, runnable.Call());
+ for (int i = 0; i < expected_size; i++) {
+ CHECK_EQ(static_cast<int>(bytes[i]), static_cast<int>(output[i]));
+ }
+ }
+ }
+}
+
+
+TEST(MixedParams_0) { MixedParamTest(0); }
+TEST(MixedParams_1) { MixedParamTest(1); }
+TEST(MixedParams_2) { MixedParamTest(2); }
+TEST(MixedParams_3) { MixedParamTest(3); }
diff --git a/deps/v8/test/cctest/compiler/test-run-properties.cc b/deps/v8/test/cctest/compiler/test-run-properties.cc
index b7677f7fd2..f2d72e4c4e 100644
--- a/deps/v8/test/cctest/compiler/test-run-properties.cc
+++ b/deps/v8/test/cctest/compiler/test-run-properties.cc
@@ -41,9 +41,9 @@ static void TypedArrayLoadHelper(const char* array_type) {
values_buffer.start(), array_type, arraysize(kValues),
values_buffer.start(), array_type, array_type);
- FunctionTester T(
- source_buffer.start(),
- CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled);
+ FunctionTester T(source_buffer.start(),
+ CompilationInfo::kFunctionContextSpecializing |
+ CompilationInfo::kTypingEnabled);
for (size_t i = 0; i < arraysize(kValues); ++i) {
for (size_t j = 0; j < arraysize(kValues); ++j) {
volatile U value_a = static_cast<U>(kValues[i]);
@@ -109,9 +109,9 @@ static void TypedArrayStoreHelper(const char* array_type) {
values_buffer.start(), array_type, arraysize(kValues),
values_buffer.start(), array_type, array_type);
- FunctionTester T(
- source_buffer.start(),
- CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled);
+ FunctionTester T(source_buffer.start(),
+ CompilationInfo::kFunctionContextSpecializing |
+ CompilationInfo::kTypingEnabled);
for (size_t i = 0; i < arraysize(kValues); ++i) {
for (size_t j = 0; j < arraysize(kValues); ++j) {
volatile U value_a = static_cast<U>(kValues[i]);
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
index 607efa135b..ca7155f5cf 100644
--- a/deps/v8/test/cctest/compiler/test-run-stubs.cc
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -41,10 +41,8 @@ TEST(RunOptimizedMathFloorStub) {
Node* start = graph.NewNode(common.Start(4));
// Parameter 0 is the number to round
Node* numberParam = graph.NewNode(common.Parameter(1), start);
- Unique<HeapObject> u = Unique<HeapObject>::CreateImmovable(code);
- Node* theCode = graph.NewNode(common.HeapConstant(u));
- Unique<HeapObject> tvu = Unique<HeapObject>::CreateImmovable(tv);
- Node* vector = graph.NewNode(common.HeapConstant(tvu));
+ Node* theCode = graph.NewNode(common.HeapConstant(code));
+ Node* vector = graph.NewNode(common.HeapConstant(tv));
Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
Node* call =
graph.NewNode(common.Call(descriptor), theCode, js.UndefinedConstant(),
@@ -83,8 +81,7 @@ TEST(RunStringLengthTFStub) {
Node* nameParam = graph.NewNode(common.Parameter(2), start);
Node* slotParam = graph.NewNode(common.Parameter(3), start);
Node* vectorParam = graph.NewNode(common.Parameter(4), start);
- Unique<HeapObject> u = Unique<HeapObject>::CreateImmovable(code);
- Node* theCode = graph.NewNode(common.HeapConstant(u));
+ Node* theCode = graph.NewNode(common.HeapConstant(code));
Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
Node* call =
graph.NewNode(common.Call(descriptor), theCode, receiverParam, nameParam,
@@ -127,8 +124,7 @@ TEST(RunStringAddTFStub) {
// Parameter 0 is the receiver
Node* leftParam = graph.NewNode(common.Parameter(1), start);
Node* rightParam = graph.NewNode(common.Parameter(2), start);
- Unique<HeapObject> u = Unique<HeapObject>::CreateImmovable(code);
- Node* theCode = graph.NewNode(common.HeapConstant(u));
+ Node* theCode = graph.NewNode(common.HeapConstant(code));
Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
Node* call = graph.NewNode(common.Call(descriptor), theCode, leftParam,
rightParam, dummyContext, start, start);
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 2a642c1589..54ffe85288 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -82,7 +82,7 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
Handle<JSFunction> fun = FunctionTester::ForMachineGraph(this->graph());
Handle<Object>* args = NULL;
MaybeHandle<Object> result = Execution::Call(
- this->isolate(), fun, factory()->undefined_value(), 0, args, false);
+ this->isolate(), fun, factory()->undefined_value(), 0, args);
return T::cast(*result.ToHandleChecked());
}
@@ -101,7 +101,7 @@ TEST(RunNumberToInt32_float64) {
FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
kMachFloat64};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
- NodeProperties::SetBounds(loaded, Bounds(Type::Number()));
+ NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToInt32(loaded);
FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
kMachInt32};
@@ -128,7 +128,7 @@ TEST(RunNumberToUint32_float64) {
FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
kMachFloat64};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
- NodeProperties::SetBounds(loaded, Bounds(Type::Number()));
+ NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToUint32(loaded);
FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
kMachUint32};
@@ -266,7 +266,7 @@ TEST(RunLoadStoreArrayBuffer) {
t.GenerateCode();
Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
+ JSArrayBuffer::SetupAllocatingData(array, t.isolate(), array_length);
uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
for (int i = 0; i < array_length; i++) {
data[i] = i;
@@ -687,9 +687,9 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
p1 = graph()->NewNode(common()->Parameter(1), start);
p2 = graph()->NewNode(common()->Parameter(2), start);
typer.Run();
- NodeProperties::SetBounds(p0, Bounds(p0_type));
- NodeProperties::SetBounds(p1, Bounds(p1_type));
- NodeProperties::SetBounds(p2, Bounds(p2_type));
+ NodeProperties::SetType(p0, p0_type);
+ NodeProperties::SetType(p1, p1_type);
+ NodeProperties::SetType(p2, p2_type);
}
void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
@@ -698,6 +698,13 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
CHECK_EQ(expected, node->opcode());
}
+ void CheckLoweringStringBinop(IrOpcode::Value expected, const Operator* op) {
+ Node* node = Return(
+ graph()->NewNode(op, p0, p1, graph()->start(), graph()->start()));
+ Lower();
+ CHECK_EQ(expected, node->opcode());
+ }
+
void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
const Operator* trunc) {
Node* node = graph()->NewNode(op, p0, p1);
@@ -745,17 +752,6 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
}
}
- Node* ExampleWithTypeAndRep(Type* type, MachineType mach_type) {
- FieldAccess access = {kUntaggedBase, 0, Handle<Name>::null(), type,
- mach_type};
- // TODO(titzer): using loads here just to force the representation is ugly.
- Node* node = graph()->NewNode(simplified()->LoadField(access),
- jsgraph.IntPtrConstant(0), graph()->start(),
- graph()->start());
- NodeProperties::SetBounds(node, Bounds(type));
- return node;
- }
-
Node* Use(Node* node, MachineType type) {
if (type & kTypeInt32) {
return graph()->NewNode(machine()->Int32LessThan(), node,
@@ -1069,7 +1065,7 @@ TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
// NumberToInt32(x: kRepFloat64) used as kMachInt32
TestingGraph t(Type::Number());
- Node* p0 = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
+ Node* p0 = t.ExampleWithOutput(kMachFloat64);
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
Node* use = t.Use(trunc, kMachInt32);
t.Return(use);
@@ -1131,7 +1127,7 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
TestingGraph t(Type::Number());
Node* p0 = t.ExampleWithOutput(kMachFloat64);
// TODO(titzer): run the typer here, or attach machine type to param.
- NodeProperties::SetBounds(p0, Bounds(Type::Number()));
+ NodeProperties::SetType(p0, Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
Node* use = t.Use(trunc, kMachUint32);
t.Return(use);
@@ -1158,7 +1154,7 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
// NumberToUint32(x: kRepFloat64) used as kRepWord32
TestingGraph t(Type::Unsigned32());
- Node* input = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
+ Node* input = t.ExampleWithOutput(kMachFloat64);
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
Node* use = t.Use(trunc, kRepWord32);
t.Return(use);
@@ -1167,58 +1163,6 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
}
-TEST(LowerNumberToUI32_of_Float64_used_as_word32) {
- // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
- // kType(Int,Uint)32 | kRepWord32
- Type* types[] = {Type::Signed32(), Type::Unsigned32()};
- MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
-
- for (int i = 0; i < 2; i++) {
- for (int u = 0; u < 3; u++) {
- TestingGraph t(types[i]);
- Node* input = t.ExampleWithTypeAndRep(
- types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
- const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
- : t.simplified()->NumberToUint32();
- Node* trunc = t.graph()->NewNode(op, input);
- Node* use = t.Use(trunc, static_cast<MachineType>(kRepWord32 | mach[u]));
- t.Return(use);
- t.Lower();
- IrOpcode::Value opcode = i == 0 ? IrOpcode::kChangeFloat64ToInt32
- : IrOpcode::kChangeFloat64ToUint32;
- CheckChangeOf(opcode, input, use->InputAt(0));
- }
- }
-}
-
-
-TEST(LowerNumberToUI32_of_Float64_used_as_tagged) {
- // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
- // kType(Int,Uint)32 | kRepTagged
- Type* types[] = {Type::Signed32(), Type::Unsigned32(), Type::Any()};
- MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
-
- for (int i = 0; i < 2; i++) {
- for (int u = 0; u < 3; u++) {
- TestingGraph t(types[i]);
- Node* input = t.ExampleWithTypeAndRep(
- types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
- const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
- : t.simplified()->NumberToUint32();
- Node* trunc = t.graph()->NewNode(op, input);
- // TODO(titzer): we use the store here to force the representation.
- FieldAccess access = {kTaggedBase, 0, Handle<Name>(), types[u],
- static_cast<MachineType>(mach[u] | kRepTagged)};
- Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
- trunc, t.start, t.start);
- t.Effect(store);
- t.Lower();
- CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, input, store->InputAt(2));
- }
- }
-}
-
-
TEST(LowerReferenceEqual_to_wordeq) {
TestingGraph t(Type::Any(), Type::Any());
IrOpcode::Value opcode =
@@ -1236,9 +1180,10 @@ TEST(LowerStringOps_to_call_and_compare) {
static_cast<IrOpcode::Value>(t.machine()->IntLessThan()->opcode());
IrOpcode::Value compare_le = static_cast<IrOpcode::Value>(
t.machine()->IntLessThanOrEqual()->opcode());
- t.CheckLoweringBinop(compare_eq, t.simplified()->StringEqual());
- t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
- t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
+ t.CheckLoweringStringBinop(compare_eq, t.simplified()->StringEqual());
+ t.CheckLoweringStringBinop(compare_lt, t.simplified()->StringLessThan());
+ t.CheckLoweringStringBinop(compare_le,
+ t.simplified()->StringLessThanOrEqual());
}
@@ -1282,7 +1227,10 @@ TEST(InsertBasicChanges) {
static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
IrOpcode::Value input_change,
IrOpcode::Value output_change) {
- Node* binop = t->graph()->NewNode(op, t->p0, t->p1);
+ Node* binop =
+ op->ControlInputCount() == 0
+ ? t->graph()->NewNode(op, t->p0, t->p1)
+ : t->graph()->NewNode(op, t->p0, t->p1, t->graph()->start());
t->Return(binop);
t->Lower();
CHECK_EQ(input_change, binop->InputAt(0)->opcode());
@@ -1397,8 +1345,8 @@ Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
}
-const MachineType kMachineReps[] = {kRepBit, kMachInt8, kMachInt16,
- kMachInt32, kMachInt64, kMachFloat64,
+const MachineType kMachineReps[] = {kMachInt8, kMachInt16, kMachInt32,
+ kMachUint32, kMachInt64, kMachFloat64,
kMachAnyTagged};
} // namespace
@@ -1452,8 +1400,10 @@ TEST(LowerStoreField_to_store) {
}
}
{
- TestingGraph t(Type::Any(),
- Type::Intersect(Type::SignedSmall(), Type::TaggedSigned()));
+ HandleAndZoneScope scope;
+ Zone* z = scope.main_zone();
+ TestingGraph t(Type::Any(), Type::Intersect(Type::SignedSmall(),
+ Type::TaggedSigned(), z));
FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Handle<Name>::null(), Type::Any(), kMachAnyTagged};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
@@ -1515,8 +1465,11 @@ TEST(LowerStoreElement_to_store) {
}
}
{
- TestingGraph t(Type::Any(), Type::Signed32(),
- Type::Intersect(Type::SignedSmall(), Type::TaggedSigned()));
+ HandleAndZoneScope scope;
+ Zone* z = scope.main_zone();
+ TestingGraph t(
+ Type::Any(), Type::Signed32(),
+ Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), z));
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
Type::Any(), kMachAnyTagged};
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
@@ -2039,8 +1992,8 @@ TEST(PhiRepresentation) {
Node* phi =
t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), t.p0, t.p1, m);
- Bounds phi_bounds = Bounds::Either(Bounds(d.arg1), Bounds(d.arg2), z);
- NodeProperties::SetBounds(phi, phi_bounds);
+ Type* phi_type = Type::Union(d.arg1, d.arg2, z);
+ NodeProperties::SetType(phi, phi_type);
Node* use = t.Use(phi, d.use);
t.Return(use);
diff --git a/deps/v8/test/cctest/compiler/value-helper.h b/deps/v8/test/cctest/compiler/value-helper.h
index b5931fb30e..5c0481fe18 100644
--- a/deps/v8/test/cctest/compiler/value-helper.h
+++ b/deps/v8/test/cctest/compiler/value-helper.h
@@ -47,9 +47,9 @@ class ValueHelper {
CHECK_EQ(expected, OpParameter<int32_t>(node));
}
- void CheckHeapConstant(Object* expected, Node* node) {
+ void CheckHeapConstant(HeapObject* expected, Node* node) {
CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
- CHECK_EQ(expected, *OpParameter<Unique<Object> >(node).handle());
+ CHECK_EQ(expected, *OpParameter<Handle<HeapObject>>(node));
}
void CheckTrue(Node* node) {
@@ -131,6 +131,40 @@ class ValueHelper {
return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
}
+ static const std::vector<int64_t> int64_vector() {
+ std::vector<uint64_t> values = uint64_vector();
+ return std::vector<int64_t>(values.begin(), values.end());
+ }
+
+ static const std::vector<uint64_t> uint64_vector() {
+ static const uint64_t kValues[] = {
+ 0x00000000, 0x00000001, 0xffffffff,
+ 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+ 0x00000002, 0x00000003, 0x00000004,
+ 0x00000005, 0x00000008, 0x00000009,
+ 0xffffffffffffffff, 0xfffffffffffffffe, 0xfffffffffffffffd,
+ 0x0000000000000000, 0x0000000100000000, 0xffffffff00000000,
+ 0x1b09788b00000000, 0x04c5fce800000000, 0xcc0de5bf00000000,
+ 0x0000000200000000, 0x0000000300000000, 0x0000000400000000,
+ 0x0000000500000000, 0x0000000800000000, 0x0000000900000000,
+ 0x273a798e187937a3, 0xece3af835495a16b, 0x0b668ecc11223344,
+ 0x0000009e, 0x00000043, 0x0000af73,
+ 0x0000116b, 0x00658ecc, 0x002b3b4c,
+ 0x88776655, 0x70000000, 0x07200000,
+ 0x7fffffff, 0x56123761, 0x7fffff00,
+ 0x761c4761eeeeeeee, 0x80000000eeeeeeee, 0x88888888dddddddd,
+ 0xa0000000dddddddd, 0xddddddddaaaaaaaa, 0xe0000000aaaaaaaa,
+ 0xeeeeeeeeeeeeeeee, 0xfffffffdeeeeeeee, 0xf0000000dddddddd,
+ 0x007fffffdddddddd, 0x003fffffaaaaaaaa, 0x001fffffaaaaaaaa,
+ 0x000fffff, 0x0007ffff, 0x0003ffff,
+ 0x0001ffff, 0x0000ffff, 0x00007fff,
+ 0x00003fff, 0x00001fff, 0x00000fff,
+ 0x000007ff, 0x000003ff, 0x000001ff,
+ 0x00003fffffffffff, 0x00001fffffffffff, 0x00000fffffffffff,
+ 0x000007ffffffffff, 0x000003ffffffffff, 0x000001ffffffffff};
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+ }
+
static const std::vector<double> nan_vector(size_t limit = 0) {
static const double nan = std::numeric_limits<double>::quiet_NaN();
static const double values[] = {-nan, -V8_INFINITY * -0.0,
@@ -156,6 +190,8 @@ class ValueHelper {
#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_INT64_INPUTS(var) FOR_INPUTS(int64_t, int64, var)
+#define FOR_UINT64_INPUTS(var) FOR_INPUTS(uint64_t, uint64, var)
#define FOR_FLOAT32_INPUTS(var) FOR_INPUTS(float, float32, var)
#define FOR_FLOAT64_INPUTS(var) FOR_INPUTS(double, float64, var)
diff --git a/deps/v8/test/cctest/expression-type-collector-macros.h b/deps/v8/test/cctest/expression-type-collector-macros.h
new file mode 100644
index 0000000000..0b739f40b7
--- /dev/null
+++ b/deps/v8/test/cctest/expression-type-collector-macros.h
@@ -0,0 +1,38 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXPRESSION_TYPE_COLLECTOR_MACROS_H_
+#define V8_EXPRESSION_TYPE_COLLECTOR_MACROS_H_
+
+#define CHECK_TYPES_BEGIN \
+ { \
+ size_t index = 0; \
+ int depth = 0;
+
+#define CHECK_TYPES_END \
+ CHECK_EQ(index, types.size()); \
+ }
+
+#define CHECK_EXPR(ekind, type) \
+ CHECK_LT(index, types.size()); \
+ CHECK(strcmp(#ekind, types[index].kind) == 0); \
+ CHECK_EQ(depth, types[index].depth); \
+ CHECK(types[index].bounds.Narrows(type)); \
+ for (int j = (++depth, ++index, 0); j < 1 ? 1 : (--depth, 0); ++j)
+
+#define CHECK_VAR(vname, type) \
+ CHECK_EXPR(VariableProxy, type); \
+ CHECK_EQ(#vname, std::string(types[index - 1].name->raw_data(), \
+ types[index - 1].name->raw_data() + \
+ types[index - 1].name->byte_length()));
+
+#define CHECK_SKIP() \
+ { \
+ ++index; \
+ while (index < types.size() && types[index].depth > depth) { \
+ ++index; \
+ } \
+ }
+
+#endif // V8_EXPRESSION_TYPE_COLLECTOR_MACROS_H_
diff --git a/deps/v8/test/cctest/expression-type-collector.cc b/deps/v8/test/cctest/expression-type-collector.cc
new file mode 100644
index 0000000000..9ab85edf82
--- /dev/null
+++ b/deps/v8/test/cctest/expression-type-collector.cc
@@ -0,0 +1,61 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/expression-type-collector.h"
+
+#include "src/ast.h"
+#include "src/codegen.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace {
+
+struct {
+ AstNode::NodeType type;
+ const char* name;
+} NodeTypeNameList[] = {
+#define DECLARE_VISIT(type) \
+ { AstNode::k##type, #type } \
+ ,
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+};
+}
+
+
+ExpressionTypeCollector::ExpressionTypeCollector(
+ Isolate* isolate, Zone* zone, FunctionLiteral* root,
+ ZoneVector<ExpressionTypeEntry>* dst)
+ : AstExpressionVisitor(isolate, zone, root), result_(dst) {}
+
+
+void ExpressionTypeCollector::Run() {
+ result_->clear();
+ AstExpressionVisitor::Run();
+}
+
+
+void ExpressionTypeCollector::VisitExpression(Expression* expression) {
+ ExpressionTypeEntry e;
+ e.depth = depth();
+ VariableProxy* proxy = expression->AsVariableProxy();
+ if (proxy) {
+ e.name = proxy->raw_name();
+ }
+ e.bounds = expression->bounds();
+ AstNode::NodeType type = expression->node_type();
+ e.kind = "unknown";
+ for (size_t i = 0; i < arraysize(NodeTypeNameList); ++i) {
+ if (NodeTypeNameList[i].type == type) {
+ e.kind = NodeTypeNameList[i].name;
+ break;
+ }
+ }
+ result_->push_back(e);
+}
+}
+}
diff --git a/deps/v8/test/cctest/expression-type-collector.h b/deps/v8/test/cctest/expression-type-collector.h
new file mode 100644
index 0000000000..2175f5a045
--- /dev/null
+++ b/deps/v8/test/cctest/expression-type-collector.h
@@ -0,0 +1,39 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXPRESSION_TYPE_COLLECTOR_H_
+#define V8_EXPRESSION_TYPE_COLLECTOR_H_
+
+#include "src/ast-expression-visitor.h"
+
+namespace v8 {
+namespace internal {
+
+// A Visitor over an AST that collects a human readable string summarizing
+// structure and types. Used for testing of the typing information attached
+// to the expression nodes of an AST.
+
+struct ExpressionTypeEntry {
+ int depth;
+ const char* kind;
+ const AstRawString* name;
+ Bounds bounds;
+};
+
+class ExpressionTypeCollector : public AstExpressionVisitor {
+ public:
+ ExpressionTypeCollector(Isolate* isolate, Zone* zone, FunctionLiteral* root,
+ ZoneVector<ExpressionTypeEntry>* dst);
+ void Run();
+
+ protected:
+ void VisitExpression(Expression* expression);
+
+ private:
+ ZoneVector<ExpressionTypeEntry>* result_;
+};
+}
+} // namespace v8::internal
+
+#endif // V8_EXPRESSION_TYPE_COLLECTOR_H_
diff --git a/deps/v8/test/cctest/gay-fixed.h b/deps/v8/test/cctest/gay-fixed.h
index f5c31edf6a..c14c11f050 100644
--- a/deps/v8/test/cctest/gay-fixed.h
+++ b/deps/v8/test/cctest/gay-fixed.h
@@ -28,6 +28,8 @@
#ifndef GAY_FIXED_H_
#define GAY_FIXED_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/gay-precision.h b/deps/v8/test/cctest/gay-precision.h
index 10dd58b3cd..08aaa1d3da 100644
--- a/deps/v8/test/cctest/gay-precision.h
+++ b/deps/v8/test/cctest/gay-precision.h
@@ -28,6 +28,8 @@
#ifndef GAY_PRECISION_H_
#define GAY_PRECISION_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/gay-shortest.h b/deps/v8/test/cctest/gay-shortest.h
index 9b512148e6..4efe76ba0b 100644
--- a/deps/v8/test/cctest/gay-shortest.h
+++ b/deps/v8/test/cctest/gay-shortest.h
@@ -28,6 +28,8 @@
#ifndef GAY_SHORTEST_H_
#define GAY_SHORTEST_H_
+#include "src/vector.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/heap-tester.h b/deps/v8/test/cctest/heap-tester.h
new file mode 100644
index 0000000000..fc2e254393
--- /dev/null
+++ b/deps/v8/test/cctest/heap-tester.h
@@ -0,0 +1,59 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef HEAP_TESTER_H_
+#define HEAP_TESTER_H_
+
+#include "src/handles.h"
+#include "src/heap/spaces.h"
+
+// Tests that should have access to private methods of {v8::internal::Heap}.
+// Those tests need to be defined using HEAP_TEST(Name) { ... }.
+#define HEAP_TEST_METHODS(V) \
+ V(GCFlags) \
+ V(MarkCompactCollector) \
+ V(NoPromotion) \
+ V(NumberStringCacheSize) \
+ V(ObjectGroups) \
+ V(Promotion) \
+ V(Regression39128) \
+ V(ResetWeakHandle) \
+ V(StressHandles) \
+ V(TestSizeOfObjects) \
+ V(WriteBarriersInCopyJSObject)
+
+
+#define HEAP_TEST(Name) \
+ CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \
+ #Name, NULL, true, true); \
+ void v8::internal::HeapTester::Test##Name()
+
+
+#define THREADED_HEAP_TEST(Name) \
+ RegisterThreadedTest register_##Name(v8::internal::HeapTester::Test##Name, \
+ #Name); \
+ /* */ HEAP_TEST(Name)
+
+
+namespace v8 {
+namespace internal {
+
+class HeapTester {
+ public:
+#define DECLARE_STATIC(Name) static void Test##Name();
+
+ HEAP_TEST_METHODS(DECLARE_STATIC)
+#undef HEAP_TEST_METHODS
+
+ /* test-alloc.cc */
+ static AllocationResult AllocateAfterFailures();
+ static Handle<Object> TestAllocateAfterFailures();
+
+ /* test-api.cc */
+ static void ResetWeakHandle(bool global_gc);
+};
+}
+}
+
+#endif // HEAP_TESTER_H_
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index deb6c971a1..23c6486f4f 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/compiler.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
#include "test/cctest/cctest.h"
@@ -15,15 +16,23 @@ namespace interpreter {
class BytecodeGeneratorHelper {
public:
- const char* kFunctionName = "my_function";
+ const char* kFunctionName = "f";
+
+ static const int kLastParamIndex =
+ -InterpreterFrameConstants::kLastParamFromRegisterPointer / kPointerSize;
BytecodeGeneratorHelper() {
+ i::FLAG_vector_stores = true;
i::FLAG_ignition = true;
- i::FLAG_ignition_filter = kFunctionName;
+ i::FLAG_ignition_filter = StrDup(kFunctionName);
+ i::FLAG_always_opt = false;
CcTest::i_isolate()->interpreter()->Initialize();
}
+ Factory* factory() { return CcTest::i_isolate()->factory(); }
+
+
Handle<BytecodeArray> MakeBytecode(const char* script,
const char* function_name) {
CompileRun(script);
@@ -40,48 +49,132 @@ class BytecodeGeneratorHelper {
kFunctionName);
return MakeBytecode(program.start(), kFunctionName);
}
+
+ Handle<BytecodeArray> MakeBytecodeForFunction(const char* function) {
+ ScopedVector<char> program(1024);
+ SNPrintF(program, "%s\n%s();", function, kFunctionName);
+ return MakeBytecode(program.start(), kFunctionName);
+ }
};
+// Helper macros for handcrafting bytecode sequences.
+#define B(x) static_cast<uint8_t>(Bytecode::k##x)
+#define U8(x) static_cast<uint8_t>((x) & 0xff)
+#define R(x) static_cast<uint8_t>(-(x) & 0xff)
+#define _ static_cast<uint8_t>(0x5a)
+
+
// Structure for containing expected bytecode snippets.
+template<typename T>
struct ExpectedSnippet {
- const char* body;
+ const char* code_snippet;
int frame_size;
+ int parameter_count;
int bytecode_length;
- const uint8_t bytecode[16];
+ const uint8_t bytecode[512];
+ int constant_count;
+ T constants[4];
};
-// Helper macros for handcrafting bytecode sequences.
-#define B(x) static_cast<uint8_t>(Bytecode::k##x)
-#define U8(x) static_cast<uint8_t>(x & 0xff)
-#define R(x) static_cast<uint8_t>(-x & 0xff)
+static void CheckConstant(int expected, Object* actual) {
+ CHECK_EQ(expected, Smi::cast(actual)->value());
+}
+
+
+static void CheckConstant(double expected, Object* actual) {
+ CHECK_EQ(expected, HeapNumber::cast(actual)->value());
+}
+
+
+static void CheckConstant(const char* expected, Object* actual) {
+ Handle<String> expected_string =
+ CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(expected);
+ CHECK(String::cast(actual)->Equals(*expected_string));
+}
+
+
+static void CheckConstant(Handle<Object> expected, Object* actual) {
+ CHECK(actual == *expected || expected->StrictEquals(actual));
+}
+
+
+template <typename T>
+static void CheckBytecodeArrayEqual(struct ExpectedSnippet<T> expected,
+ Handle<BytecodeArray> actual,
+ bool has_unknown = false) {
+ CHECK_EQ(actual->frame_size(), expected.frame_size);
+ CHECK_EQ(actual->parameter_count(), expected.parameter_count);
+ CHECK_EQ(actual->length(), expected.bytecode_length);
+ if (expected.constant_count == 0) {
+ CHECK_EQ(actual->constant_pool(), CcTest::heap()->empty_fixed_array());
+ } else {
+ CHECK_EQ(actual->constant_pool()->length(), expected.constant_count);
+ for (int i = 0; i < expected.constant_count; i++) {
+ CheckConstant(expected.constants[i], actual->constant_pool()->get(i));
+ }
+ }
+
+ BytecodeArrayIterator iterator(actual);
+ int i = 0;
+ while (!iterator.done()) {
+ int bytecode_index = i++;
+ Bytecode bytecode = iterator.current_bytecode();
+ if (Bytecodes::ToByte(bytecode) != expected.bytecode[bytecode_index]) {
+ std::ostringstream stream;
+ stream << "Check failed: expected bytecode [" << bytecode_index
+ << "] to be " << Bytecodes::ToString(static_cast<Bytecode>(
+ expected.bytecode[bytecode_index]))
+ << " but got " << Bytecodes::ToString(bytecode);
+ FATAL(stream.str().c_str());
+ }
+ for (int j = 0; j < Bytecodes::NumberOfOperands(bytecode); ++j, ++i) {
+ uint8_t raw_operand =
+ iterator.GetRawOperand(j, Bytecodes::GetOperandType(bytecode, j));
+ if (has_unknown) {
+ // Check actual bytecode array doesn't have the same byte as the
+ // one we use to specify an unknown byte.
+ CHECK_NE(raw_operand, _);
+ if (expected.bytecode[i] == _) {
+ continue;
+ }
+ }
+ if (raw_operand != expected.bytecode[i]) {
+ std::ostringstream stream;
+ stream << "Check failed: expected operand [" << j << "] for bytecode ["
+ << bytecode_index << "] to be "
+ << static_cast<unsigned int>(expected.bytecode[i]) << " but got "
+ << static_cast<unsigned int>(raw_operand);
+ FATAL(stream.str().c_str());
+ }
+ }
+ iterator.Advance();
+ }
+}
TEST(PrimitiveReturnStatements) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- ExpectedSnippet snippets[] = {
- {"return;", 0, 2, {B(LdaUndefined), B(Return)}},
- {"return null;", 0, 2, {B(LdaNull), B(Return)}},
- {"return true;", 0, 2, {B(LdaTrue), B(Return)}},
- {"return false;", 0, 2, {B(LdaFalse), B(Return)}},
- {"return 0;", 0, 2, {B(LdaZero), B(Return)}},
- {"return +1;", 0, 3, {B(LdaSmi8), U8(1), B(Return)}},
- {"return -1;", 0, 3, {B(LdaSmi8), U8(-1), B(Return)}},
- {"return +127;", 0, 3, {B(LdaSmi8), U8(127), B(Return)}},
- {"return -128;", 0, 3, {B(LdaSmi8), U8(-128), B(Return)}},
+ ExpectedSnippet<int> snippets[] = {
+ {"", 0, 1, 2, {B(LdaUndefined), B(Return)}, 0},
+ {"return;", 0, 1, 2, {B(LdaUndefined), B(Return)}, 0},
+ {"return null;", 0, 1, 2, {B(LdaNull), B(Return)}, 0},
+ {"return true;", 0, 1, 2, {B(LdaTrue), B(Return)}, 0},
+ {"return false;", 0, 1, 2, {B(LdaFalse), B(Return)}, 0},
+ {"return 0;", 0, 1, 2, {B(LdaZero), B(Return)}, 0},
+ {"return +1;", 0, 1, 3, {B(LdaSmi8), U8(1), B(Return)}, 0},
+ {"return -1;", 0, 1, 3, {B(LdaSmi8), U8(-1), B(Return)}, 0},
+ {"return +127;", 0, 1, 3, {B(LdaSmi8), U8(127), B(Return)}, 0},
+ {"return -128;", 0, 1, 3, {B(LdaSmi8), U8(-128), B(Return)}, 0},
};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
- Handle<BytecodeArray> ba =
- helper.MakeBytecodeForFunctionBody(snippets[i].body);
- CHECK_EQ(ba->frame_size(), snippets[i].frame_size);
- CHECK_EQ(ba->length(), snippets[i].bytecode_length);
- CHECK(!memcmp(ba->GetFirstBytecodeAddress(), snippets[i].bytecode,
- ba->length()));
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
}
@@ -90,18 +183,22 @@ TEST(PrimitiveExpressions) {
InitializedHandleScope handle_scope;
BytecodeGeneratorHelper helper;
- ExpectedSnippet snippets[] = {
+ ExpectedSnippet<int> snippets[] = {
{"var x = 0; return x;",
kPointerSize,
+ 1,
6,
{
B(LdaZero), //
B(Star), R(0), //
B(Ldar), R(0), //
B(Return) //
- }},
+ },
+ 0
+ },
{"var x = 0; return x + 3;",
2 * kPointerSize,
+ 1,
12,
{
B(LdaZero), //
@@ -111,19 +208,701 @@ TEST(PrimitiveExpressions) {
B(LdaSmi8), U8(3), //
B(Add), R(1), //
B(Return) //
- }}};
+ },
+ 0
+ }};
- size_t num_snippets = sizeof(snippets) / sizeof(snippets[0]);
- for (size_t i = 0; i < num_snippets; i++) {
- Handle<BytecodeArray> ba =
- helper.MakeBytecodeForFunctionBody(snippets[i].body);
- CHECK_EQ(ba->frame_size(), snippets[i].frame_size);
- CHECK_EQ(ba->length(), snippets[i].bytecode_length);
- CHECK(!memcmp(ba->GetFirstBytecodeAddress(), snippets[i].bytecode,
- ba->length()));
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
}
}
+
+TEST(Parameters) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<int> snippets[] = {
+ {"function f() { return this; }",
+ 0, 1, 3, {B(Ldar), R(helper.kLastParamIndex), B(Return)}, 0},
+ {"function f(arg1) { return arg1; }",
+ 0, 2, 3, {B(Ldar), R(helper.kLastParamIndex), B(Return)}, 0},
+ {"function f(arg1) { return this; }",
+ 0, 2, 3, {B(Ldar), R(helper.kLastParamIndex - 1), B(Return)}, 0},
+ {"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return arg4; }",
+ 0, 8, 3, {B(Ldar), R(helper.kLastParamIndex - 3), B(Return)}, 0},
+ {"function f(arg1, arg2, arg3, arg4, arg5, arg6, arg7) { return this; }",
+ 0, 8, 3, {B(Ldar), R(helper.kLastParamIndex - 7), B(Return)}, 0}
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunction(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(IntegerConstants) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<int> snippets[] = {
+ {"return 12345678;",
+ 0,
+ 1,
+ 3,
+ {
+ B(LdaConstant), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {12345678}},
+ {"var a = 1234; return 5678;",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(1), //
+ B(Return) //
+ },
+ 2,
+ {1234, 5678}},
+ {"var a = 1234; return 1234;",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {1234}}};
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(HeapNumberConstants) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<double> snippets[] = {
+ {"return 1.2;",
+ 0,
+ 1,
+ 3,
+ {
+ B(LdaConstant), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {1.2}},
+ {"var a = 1.2; return 2.6;",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(1), //
+ B(Return) //
+ },
+ 2,
+ {1.2, 2.6}},
+ {"var a = 3.14; return 3.14;",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(1), //
+ B(Return) //
+ },
+ 2,
+ {3.14, 3.14}}};
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(StringConstants) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"return \"This is a string\";",
+ 0,
+ 1,
+ 3,
+ {
+ B(LdaConstant), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {"This is a string"}},
+ {"var a = \"First string\"; return \"Second string\";",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(1), //
+ B(Return) //
+ },
+ 2,
+ {"First string", "Second string"}},
+ {"var a = \"Same string\"; return \"Same string\";",
+ 1 * kPointerSize,
+ 1,
+ 7,
+ {
+ B(LdaConstant), U8(0), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {"Same string"}}};
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecodeForFunctionBody(snippets[i].code_snippet);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(PropertyLoads) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ FeedbackVectorSlotKind ic_kinds[] = {i::FeedbackVectorSlotKind::LOAD_IC,
+ i::FeedbackVectorSlotKind::LOAD_IC};
+ StaticFeedbackVectorSpec feedback_spec(0, 2, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ helper.factory()->NewTypeFeedbackVector(&feedback_spec);
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"function f(a) { return a.name; }\nf({name : \"test\"})",
+ 1 * kPointerSize,
+ 2,
+ 10,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(LoadIC), R(0), U8(vector->first_ic_slot_index()), //
+ B(Return) //
+ },
+ 1,
+ {"name"}},
+ {"function f(a) { return a[\"key\"]; }\nf({key : \"test\"})",
+ 1 * kPointerSize,
+ 2,
+ 10,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(LoadIC), R(0), U8(vector->first_ic_slot_index()), //
+ B(Return) //
+ },
+ 1,
+ {"key"}},
+ {"function f(a) { return a[100]; }\nf({100 : \"test\"})",
+ 1 * kPointerSize,
+ 2,
+ 10,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(100), //
+ B(KeyedLoadIC), R(0), U8(vector->first_ic_slot_index()), //
+ B(Return) //
+ },
+ 0},
+ {"function f(a, b) { return a[b]; }\nf({arg : \"test\"}, \"arg\")",
+ 1 * kPointerSize,
+ 3,
+ 10,
+ {
+ B(Ldar), R(helper.kLastParamIndex - 1), //
+ B(Star), R(0), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(KeyedLoadIC), R(0), U8(vector->first_ic_slot_index()), //
+ B(Return) //
+ },
+ 0},
+ {"function f(a) { var b = a.name; return a[-124]; }\n"
+ "f({\"-124\" : \"test\", name : 123 })",
+ 2 * kPointerSize,
+ 2,
+ 21,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(LoadIC), R(1), U8(vector->first_ic_slot_index()), //
+ B(Star), R(0), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(1), //
+ B(LdaSmi8), U8(-124), //
+ B(KeyedLoadIC), R(1), U8(vector->first_ic_slot_index() + 2), //
+ B(Return) //
+ },
+ 1,
+ {"name"}}};
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(PropertyStores) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ FeedbackVectorSlotKind ic_kinds[] = {i::FeedbackVectorSlotKind::STORE_IC,
+ i::FeedbackVectorSlotKind::STORE_IC};
+ StaticFeedbackVectorSpec feedback_spec(0, 2, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ helper.factory()->NewTypeFeedbackVector(&feedback_spec);
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"function f(a) { a.name = \"val\"; }\nf({name : \"test\"})",
+ 2 * kPointerSize,
+ 2,
+ 16,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
+ B(StoreIC), R(0), R(1), U8(vector->first_ic_slot_index()), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 2,
+ {"name", "val"}},
+ {"function f(a) { a[\"key\"] = \"val\"; }\nf({key : \"test\"})",
+ 2 * kPointerSize,
+ 2,
+ 16,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(1), //
+ B(StoreIC), R(0), R(1), U8(vector->first_ic_slot_index()), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 2,
+ {"key", "val"}},
+ {"function f(a) { a[100] = \"val\"; }\nf({100 : \"test\"})",
+ 2 * kPointerSize,
+ 2,
+ 16,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaSmi8), U8(100), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(KeyedStoreIC), R(0), R(1), U8(vector->first_ic_slot_index()), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"val"}},
+ {"function f(a, b) { a[b] = \"val\"; }\nf({arg : \"test\"}, \"arg\")",
+ 2 * kPointerSize,
+ 3,
+ 16,
+ {
+ B(Ldar), R(helper.kLastParamIndex - 1), //
+ B(Star), R(0), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(KeyedStoreIC), R(0), R(1), U8(vector->first_ic_slot_index()), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"val"}},
+ {"function f(a) { a.name = a[-124]; }\n"
+ "f({\"-124\" : \"test\", name : 123 })",
+ 3 * kPointerSize,
+ 2,
+ 23,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaConstant), U8(0), //
+ B(Star), R(1), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(2), //
+ B(LdaSmi8), U8(-124), //
+ B(KeyedLoadIC), R(2), U8(vector->first_ic_slot_index()), //
+ B(StoreIC), R(0), R(1), U8(vector->first_ic_slot_index() + 2), //
+ B(LdaUndefined), //
+ B(Return) //
+ },
+ 1,
+ {"name"}}};
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+#define FUNC_ARG "new (function Obj() { this.func = function() { return; }})()"
+
+
+TEST(PropertyCall) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper; //
+
+ FeedbackVectorSlotKind ic_kinds[] = {i::FeedbackVectorSlotKind::LOAD_IC,
+ i::FeedbackVectorSlotKind::LOAD_IC};
+ StaticFeedbackVectorSpec feedback_spec(0, 2, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ helper.factory()->NewTypeFeedbackVector(&feedback_spec);
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"function f(a) { return a.func(); }\nf(" FUNC_ARG ")",
+ 2 * kPointerSize,
+ 2,
+ 16,
+ {
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(LoadIC), R(1), U8(vector->first_ic_slot_index() + 2), //
+ B(Star), R(0), //
+ B(Call), R(0), R(1), U8(0), //
+ B(Return) //
+ },
+ 1,
+ {"func"}},
+ {"function f(a, b, c) { return a.func(b, c); }\nf(" FUNC_ARG ", 1, 2)",
+ 4 * kPointerSize,
+ 4,
+ 24,
+ {
+ B(Ldar), R(helper.kLastParamIndex - 2), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(LoadIC), R(1), U8(vector->first_ic_slot_index() + 2), //
+ B(Star), R(0), //
+ B(Ldar), R(helper.kLastParamIndex - 1), //
+ B(Star), R(2), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(3), //
+ B(Call), R(0), R(1), U8(2), //
+ B(Return) //
+ },
+ 1,
+ {"func"}},
+ {"function f(a, b) { return a.func(b + b, b); }\nf(" FUNC_ARG ", 1)",
+ 4 * kPointerSize,
+ 3,
+ 30,
+ {
+ B(Ldar), R(helper.kLastParamIndex - 1), //
+ B(Star), R(1), //
+ B(LdaConstant), U8(0), //
+ B(LoadIC), R(1), U8(vector->first_ic_slot_index() + 2), //
+ B(Star), R(0), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(2), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Add), R(2), //
+ B(Star), R(2), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(3), //
+ B(Call), R(0), R(1), U8(2), //
+ B(Return) //
+ },
+ 1,
+ {"func"}}};
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
+TEST(LoadGlobal) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"var a = 1;\nfunction f() { return a; }\nf()",
+ 0, 1, 3,
+ {
+ B(LdaGlobal), _,
+ B(Return)
+ },
+ },
+ {"function t() { }\nfunction f() { return t; }\nf()",
+ 0, 1, 3,
+ {
+ B(LdaGlobal), _,
+ B(Return)
+ },
+ },
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(snippets[i].code_snippet, "f");
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array, true);
+ }
+}
+
+
+TEST(CallGlobal) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ ExpectedSnippet<const char*> snippets[] = {
+ {"function t() { }\nfunction f() { return t(); }\nf()",
+ 2 * kPointerSize, 1, 12,
+ {
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(LdaGlobal), _,
+ B(Star), R(0),
+ B(Call), R(0), R(1), U8(0),
+ B(Return)
+ },
+ },
+ {"function t(a, b, c) { }\nfunction f() { return t(1, 2, 3); }\nf()",
+ 5 * kPointerSize, 1, 24,
+ {
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(LdaGlobal), _,
+ B(Star), R(0),
+ B(LdaSmi8), U8(1),
+ B(Star), R(2),
+ B(LdaSmi8), U8(2),
+ B(Star), R(3),
+ B(LdaSmi8), U8(3),
+ B(Star), R(4),
+ B(Call), R(0), R(1), U8(3),
+ B(Return)
+ },
+ },
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(snippets[i].code_snippet, "f");
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array, true);
+ }
+}
+
+
+TEST(IfConditions) {
+ InitializedHandleScope handle_scope;
+ BytecodeGeneratorHelper helper;
+
+ Handle<Object> unused = helper.factory()->undefined_value();
+
+ ExpectedSnippet<Handle<Object>> snippets[] = {
+ {"function f() { if (0) { return 1; } else { return -1; } } f()",
+ 0,
+ 1,
+ 14,
+ {B(LdaZero), //
+ B(ToBoolean), //
+ B(JumpIfFalse), U8(7), //
+ B(LdaSmi8), U8(1), //
+ B(Return), //
+ B(Jump), U8(5), // TODO(oth): Unreachable jump after return
+ B(LdaSmi8), U8(-1), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return)}, //
+ 0,
+ {unused, unused, unused, unused}},
+ {"function f() { if ('lucky') { return 1; } else { return -1; } } f();",
+ 0,
+ 1,
+ 15,
+ {B(LdaConstant), U8(0), //
+ B(ToBoolean), //
+ B(JumpIfFalse), U8(7), //
+ B(LdaSmi8), U8(1), //
+ B(Return), //
+ B(Jump), U8(5), // TODO(oth): Unreachable jump after return
+ B(LdaSmi8), U8(-1), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return)}, //
+ 1,
+ {helper.factory()->NewStringFromStaticChars("lucky"), unused, unused,
+ unused}},
+ {"function f() { if (false) { return 1; } else { return -1; } } f();",
+ 0,
+ 1,
+ 13,
+ {B(LdaFalse), //
+ B(JumpIfFalse), U8(7), //
+ B(LdaSmi8), U8(1), //
+ B(Return), //
+ B(Jump), U8(5), // TODO(oth): Unreachable jump after return
+ B(LdaSmi8), U8(-1), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return)}, //
+ 0,
+ {unused, unused, unused, unused}},
+ {"function f(a) { if (a <= 0) { return 200; } else { return -200; } }"
+ "f(99);",
+ kPointerSize,
+ 2,
+ 19,
+ {B(Ldar), R(helper.kLastParamIndex), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(TestLessThanOrEqual), R(0), //
+ B(JumpIfFalse), U8(7), //
+ B(LdaConstant), U8(0), //
+ B(Return), //
+ B(Jump), U8(5), // TODO(oth): Unreachable jump after return
+ B(LdaConstant), U8(1), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return)}, //
+ 2,
+ {helper.factory()->NewNumberFromInt(200),
+ helper.factory()->NewNumberFromInt(-200), unused, unused}},
+ {"function f(a, b) { if (a in b) { return 200; } }"
+ "f('prop', { prop: 'yes'});",
+ kPointerSize,
+ 3,
+ 17,
+ {B(Ldar), R(helper.kLastParamIndex - 1), //
+ B(Star), R(0), //
+ B(Ldar), R(helper.kLastParamIndex), //
+ B(TestIn), R(0), //
+ B(JumpIfFalse), U8(7), //
+ B(LdaConstant), U8(0), //
+ B(Return), //
+ B(Jump), U8(2), // TODO(oth): Unreachable jump after return
+ B(LdaUndefined), //
+ B(Return)}, //
+ 1,
+ {helper.factory()->NewNumberFromInt(200), unused, unused, unused}},
+ {"function f(z) { var a = 0; var b = 0; if (a === 0.01) { "
+#define X "b = a; a = b; "
+ X X X X X X X X X X X X X X X X X X X X X X X X
+#undef X
+ " return 200; } else { return -200; } } f(0.001)",
+ 3 * kPointerSize,
+ 2,
+ 218,
+ {B(LdaZero), //
+ B(Star), R(0), //
+ B(LdaZero), //
+ B(Star), R(1), //
+ B(Ldar), R(0), //
+ B(Star), R(2), //
+ B(LdaConstant), U8(0), //
+ B(TestEqualStrict), R(2), //
+ B(JumpIfFalseConstant), U8(2), //
+#define X B(Ldar), R(0), B(Star), R(1), B(Ldar), R(1), B(Star), R(0),
+ X X X X X X X X X X X X X X X X X X X X X X X X
+#undef X
+ B(LdaConstant),
+ U8(1), //
+ B(Return), //
+ B(Jump), U8(5), // TODO(oth): Unreachable jump after return
+ B(LdaConstant), U8(3), //
+ B(Return), //
+ B(LdaUndefined), //
+ B(Return)}, //
+ 4,
+ {helper.factory()->NewHeapNumber(0.01),
+ helper.factory()->NewNumberFromInt(200),
+ helper.factory()->NewNumberFromInt(199),
+ helper.factory()->NewNumberFromInt(-200)}},
+ {"function f(a, b) {\n"
+ " if (a == b) { return 1; }\n"
+ " if (a === b) { return 1; }\n"
+ " if (a < b) { return 1; }\n"
+ " if (a > b) { return 1; }\n"
+ " if (a <= b) { return 1; }\n"
+ " if (a >= b) { return 1; }\n"
+ " if (a in b) { return 1; }\n"
+ " if (a instanceof b) { return 1; }\n"
+ " /* if (a != b) { return 1; } */" // TODO(oth) Ast visitor yields
+ " /* if (a !== b) { return 1; } */" // UNARY NOT, rather than !=/!==.
+ " return 0;\n"
+ "} f(1, 1);",
+ kPointerSize,
+ 3,
+ 122,
+ {
+#define IF_CONDITION_RETURN(condition) \
+ B(Ldar), R(helper.kLastParamIndex - 1), \
+ B(Star), R(0), \
+ B(Ldar), R(helper.kLastParamIndex), \
+ B(condition), R(0), \
+ B(JumpIfFalse), U8(7), \
+ B(LdaSmi8), U8(1), \
+ B(Return), \
+ B(Jump), U8(2),
+ IF_CONDITION_RETURN(TestEqual) //
+ IF_CONDITION_RETURN(TestEqualStrict) //
+ IF_CONDITION_RETURN(TestLessThan) //
+ IF_CONDITION_RETURN(TestGreaterThan) //
+ IF_CONDITION_RETURN(TestLessThanOrEqual) //
+ IF_CONDITION_RETURN(TestGreaterThanOrEqual) //
+ IF_CONDITION_RETURN(TestIn) //
+ IF_CONDITION_RETURN(TestInstanceOf) //
+#undef IF_CONDITION_RETURN
+ B(LdaZero), //
+ B(Return)}, //
+ 0,
+ {unused, unused, unused, unused}},
+ };
+
+ for (size_t i = 0; i < arraysize(snippets); i++) {
+ Handle<BytecodeArray> bytecode_array =
+ helper.MakeBytecode(snippets[i].code_snippet, helper.kFunctionName);
+ CheckBytecodeArrayEqual(snippets[i], bytecode_array);
+ }
+}
+
+
} // namespace interpreter
} // namespace internal
} // namespance v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index 2302fdc9ac..e238318291 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -14,16 +14,34 @@ namespace v8 {
namespace internal {
namespace interpreter {
+
+static MaybeHandle<Object> CallInterpreter(Isolate* isolate,
+ Handle<JSFunction> function) {
+ return Execution::Call(isolate, function,
+ isolate->factory()->undefined_value(), 0, nullptr);
+}
+
+
+template <class... A>
+static MaybeHandle<Object> CallInterpreter(Isolate* isolate,
+ Handle<JSFunction> function,
+ A... args) {
+ Handle<Object> argv[] = { args... };
+ return Execution::Call(isolate, function,
+ isolate->factory()->undefined_value(), sizeof...(args),
+ argv);
+}
+
+
+template <class... A>
class InterpreterCallable {
public:
InterpreterCallable(Isolate* isolate, Handle<JSFunction> function)
: isolate_(isolate), function_(function) {}
virtual ~InterpreterCallable() {}
- MaybeHandle<Object> operator()() {
- return Execution::Call(isolate_, function_,
- isolate_->factory()->undefined_value(), 0, nullptr,
- false);
+ MaybeHandle<Object> operator()(A... args) {
+ return CallInterpreter(isolate_, function_, args...);
}
private:
@@ -31,30 +49,96 @@ class InterpreterCallable {
Handle<JSFunction> function_;
};
+
+static const char* kFunctionName = "f";
+
+
class InterpreterTester {
public:
- InterpreterTester(Isolate* isolate, Handle<BytecodeArray> bytecode)
- : isolate_(isolate), function_(GetBytecodeFunction(isolate, bytecode)) {
+ InterpreterTester(Isolate* isolate, const char* source,
+ MaybeHandle<BytecodeArray> bytecode,
+ MaybeHandle<TypeFeedbackVector> feedback_vector)
+ : isolate_(isolate),
+ source_(source),
+ bytecode_(bytecode),
+ feedback_vector_(feedback_vector) {
+ i::FLAG_vector_stores = true;
i::FLAG_ignition = true;
+ i::FLAG_always_opt = false;
+ // Set ignition filter flag via SetFlagsFromString to avoid double-free
+ // (or potential leak with StrDup() based on ownership confusion).
+ ScopedVector<char> ignition_filter(64);
+ SNPrintF(ignition_filter, "--ignition-filter=%s", kFunctionName);
+ FlagList::SetFlagsFromString(ignition_filter.start(),
+ ignition_filter.length());
// Ensure handler table is generated.
isolate->interpreter()->Initialize();
}
+
+ InterpreterTester(Isolate* isolate, Handle<BytecodeArray> bytecode,
+ MaybeHandle<TypeFeedbackVector> feedback_vector =
+ MaybeHandle<TypeFeedbackVector>())
+ : InterpreterTester(isolate, nullptr, bytecode, feedback_vector) {}
+
+
+ InterpreterTester(Isolate* isolate, const char* source)
+ : InterpreterTester(isolate, source, MaybeHandle<BytecodeArray>(),
+ MaybeHandle<TypeFeedbackVector>()) {}
+
virtual ~InterpreterTester() {}
- InterpreterCallable GetCallable() {
- return InterpreterCallable(isolate_, function_);
+ template <class... A>
+ InterpreterCallable<A...> GetCallable() {
+ return InterpreterCallable<A...>(isolate_, GetBytecodeFunction<A...>());
+ }
+
+ static Handle<Object> NewObject(const char* script) {
+ return v8::Utils::OpenHandle(*CompileRun(script));
+ }
+
+ static Handle<String> GetName(Isolate* isolate, const char* name) {
+ Handle<String> result = isolate->factory()->NewStringFromAsciiChecked(name);
+ return isolate->factory()->string_table()->LookupString(isolate, result);
+ }
+
+ static std::string function_name() {
+ return std::string(kFunctionName);
}
private:
Isolate* isolate_;
- Handle<JSFunction> function_;
+ const char* source_;
+ MaybeHandle<BytecodeArray> bytecode_;
+ MaybeHandle<TypeFeedbackVector> feedback_vector_;
+
+ template <class... A>
+ Handle<JSFunction> GetBytecodeFunction() {
+ Handle<JSFunction> function;
+ if (source_) {
+ CompileRun(source_);
+ Local<Function> api_function =
+ Local<Function>::Cast(CcTest::global()->Get(v8_str(kFunctionName)));
+ function = v8::Utils::OpenHandle(*api_function);
+ } else {
+ int arg_count = sizeof...(A);
+ std::string source("(function " + function_name() + "(");
+ for (int i = 0; i < arg_count; i++) {
+ source += i == 0 ? "a" : ", a";
+ }
+ source += "){})";
+ function = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CompileRun(source.c_str())));
+ function->ReplaceCode(
+ *isolate_->builtins()->InterpreterEntryTrampoline());
+ }
- static Handle<JSFunction> GetBytecodeFunction(
- Isolate* isolate, Handle<BytecodeArray> bytecode_array) {
- Handle<JSFunction> function = v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(CompileRun("(function(){})")));
- function->ReplaceCode(*isolate->builtins()->InterpreterEntryTrampoline());
- function->shared()->set_function_data(*bytecode_array);
+ if (!bytecode_.is_null()) {
+ function->shared()->set_function_data(*bytecode_.ToHandleChecked());
+ }
+ if (!feedback_vector_.is_null()) {
+ function->shared()->set_feedback_vector(
+ *feedback_vector_.ToHandleChecked());
+ }
return function;
}
@@ -67,131 +151,188 @@ class InterpreterTester {
using v8::internal::BytecodeArray;
using v8::internal::Handle;
+using v8::internal::LanguageMode;
using v8::internal::Object;
+using v8::internal::Runtime;
using v8::internal::Smi;
+using v8::internal::Token;
using namespace v8::internal::interpreter;
-TEST(TestInterpreterReturn) {
- InitializedHandleScope handles;
+TEST(InterpreterReturn) {
+ HandleAndZoneScope handles;
Handle<Object> undefined_value =
handles.main_isolate()->factory()->undefined_value();
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(undefined_value));
}
-TEST(TestInterpreterLoadUndefined) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadUndefined) {
+ HandleAndZoneScope handles;
Handle<Object> undefined_value =
handles.main_isolate()->factory()->undefined_value();
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.LoadUndefined().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(undefined_value));
}
-TEST(TestInterpreterLoadNull) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadNull) {
+ HandleAndZoneScope handles;
Handle<Object> null_value = handles.main_isolate()->factory()->null_value();
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.LoadNull().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(null_value));
}
-TEST(TestInterpreterLoadTheHole) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadTheHole) {
+ HandleAndZoneScope handles;
Handle<Object> the_hole_value =
handles.main_isolate()->factory()->the_hole_value();
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.LoadTheHole().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(the_hole_value));
}
-TEST(TestInterpreterLoadTrue) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadTrue) {
+ HandleAndZoneScope handles;
Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.LoadTrue().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(true_value));
}
-TEST(TestInterpreterLoadFalse) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadFalse) {
+ HandleAndZoneScope handles;
Handle<Object> false_value = handles.main_isolate()->factory()->false_value();
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.LoadFalse().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(false_value));
}
-TEST(TestInterpreterLoadLiteral) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadLiteral) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+
+ // Small Smis.
for (int i = -128; i < 128; i++) {
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(0);
+ builder.set_parameter_count(1);
builder.LoadLiteral(Smi::FromInt(i)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(i));
}
+
+ // Large Smis.
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(0);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(Smi::FromInt(0x12345678)).Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(0x12345678));
+ }
+
+ // Heap numbers.
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(0);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(factory->NewHeapNumber(-2.1e19)).Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK_EQ(i::HeapNumber::cast(*return_val)->value(), -2.1e19);
+ }
+
+ // Strings.
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(0);
+ builder.set_parameter_count(1);
+ Handle<i::String> string = factory->NewStringFromAsciiChecked("String");
+ builder.LoadLiteral(string).Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK(i::String::cast(*return_val)->Equals(*string));
+ }
}
-TEST(TestInterpreterLoadStoreRegisters) {
- InitializedHandleScope handles;
+TEST(InterpreterLoadStoreRegisters) {
+ HandleAndZoneScope handles;
Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
for (int i = 0; i <= Register::kMaxRegisterIndex; i++) {
- BytecodeArrayBuilder builder(handles.main_isolate());
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
builder.set_locals_count(i + 1);
+ builder.set_parameter_count(1);
Register reg(i);
builder.LoadTrue()
.StoreAccumulatorInRegister(reg)
@@ -201,8 +342,945 @@ TEST(TestInterpreterLoadStoreRegisters) {
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
InterpreterTester tester(handles.main_isolate(), bytecode_array);
- InterpreterCallable callable(tester.GetCallable());
+ auto callable = tester.GetCallable<>();
Handle<Object> return_val = callable().ToHandleChecked();
CHECK(return_val.is_identical_to(true_value));
}
}
+
+
+static const Token::Value kArithmeticOperators[] = {
+ Token::Value::ADD, Token::Value::SUB, Token::Value::MUL, Token::Value::DIV,
+ Token::Value::MOD};
+
+
+static double BinaryOpC(Token::Value op, double lhs, double rhs) {
+ switch (op) {
+ case Token::Value::ADD:
+ return lhs + rhs;
+ case Token::Value::SUB:
+ return lhs - rhs;
+ case Token::Value::MUL:
+ return lhs * rhs;
+ case Token::Value::DIV:
+ return lhs / rhs;
+ case Token::Value::MOD:
+ return std::fmod(lhs, rhs);
+ default:
+ UNREACHABLE();
+ return std::numeric_limits<double>::min();
+ }
+}
+
+
+TEST(InterpreterBinaryOpsSmi) {
+ int lhs_inputs[] = {3266, 1024, 0, -17, -18000};
+ int rhs_inputs[] = {3266, 5, 4, 3, 2, 1, -1, -2};
+ for (size_t l = 0; l < arraysize(lhs_inputs); l++) {
+ for (size_t r = 0; r < arraysize(rhs_inputs); r++) {
+ for (size_t o = 0; o < arraysize(kArithmeticOperators); o++) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ BytecodeArrayBuilder builder(handles.main_isolate(),
+ handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ Register reg(0);
+ int lhs = lhs_inputs[l];
+ int rhs = rhs_inputs[l];
+ builder.LoadLiteral(Smi::FromInt(lhs))
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(Smi::FromInt(rhs))
+ .BinaryOperation(kArithmeticOperators[o], reg)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ Handle<Object> expected_value =
+ factory->NewNumber(BinaryOpC(kArithmeticOperators[o], lhs, rhs));
+ CHECK(return_value->SameValue(*expected_value));
+ }
+ }
+ }
+}
+
+
+TEST(InterpreterBinaryOpsHeapNumber) {
+ double lhs_inputs[] = {3266.101, 1024.12, 0.01, -17.99, -18000.833, 9.1e17};
+ double rhs_inputs[] = {3266.101, 5.999, 4.778, 3.331, 2.643,
+ 1.1, -1.8, -2.9, 8.3e-27};
+ for (size_t l = 0; l < arraysize(lhs_inputs); l++) {
+ for (size_t r = 0; r < arraysize(rhs_inputs); r++) {
+ for (size_t o = 0; o < arraysize(kArithmeticOperators); o++) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ BytecodeArrayBuilder builder(handles.main_isolate(),
+ handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ Register reg(0);
+ double lhs = lhs_inputs[l];
+ double rhs = rhs_inputs[l];
+ builder.LoadLiteral(factory->NewNumber(lhs))
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(factory->NewNumber(rhs))
+ .BinaryOperation(kArithmeticOperators[o], reg)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ Handle<Object> expected_value =
+ factory->NewNumber(BinaryOpC(kArithmeticOperators[o], lhs, rhs));
+ CHECK(return_value->SameValue(*expected_value));
+ }
+ }
+ }
+}
+
+
+TEST(InterpreterStringAdd) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+
+ struct TestCase {
+ Handle<Object> lhs;
+ Handle<Object> rhs;
+ Handle<Object> expected_value;
+ } test_cases[] = {
+ {factory->NewStringFromStaticChars("a"),
+ factory->NewStringFromStaticChars("b"),
+ factory->NewStringFromStaticChars("ab")},
+ {factory->NewStringFromStaticChars("aaaaaa"),
+ factory->NewStringFromStaticChars("b"),
+ factory->NewStringFromStaticChars("aaaaaab")},
+ {factory->NewStringFromStaticChars("aaa"),
+ factory->NewStringFromStaticChars("bbbbb"),
+ factory->NewStringFromStaticChars("aaabbbbb")},
+ {factory->NewStringFromStaticChars(""),
+ factory->NewStringFromStaticChars("b"),
+ factory->NewStringFromStaticChars("b")},
+ {factory->NewStringFromStaticChars("a"),
+ factory->NewStringFromStaticChars(""),
+ factory->NewStringFromStaticChars("a")},
+ {factory->NewStringFromStaticChars("1.11"), factory->NewHeapNumber(2.5),
+ factory->NewStringFromStaticChars("1.112.5")},
+ {factory->NewStringFromStaticChars("-1.11"), factory->NewHeapNumber(2.56),
+ factory->NewStringFromStaticChars("-1.112.56")},
+ {factory->NewStringFromStaticChars(""), factory->NewHeapNumber(2.5),
+ factory->NewStringFromStaticChars("2.5")},
+ };
+
+ for (size_t i = 0; i < arraysize(test_cases); i++) {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ Register reg(0);
+ builder.LoadLiteral(test_cases[i].lhs)
+ .StoreAccumulatorInRegister(reg)
+ .LoadLiteral(test_cases[i].rhs)
+ .BinaryOperation(Token::Value::ADD, reg)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->SameValue(*test_cases[i].expected_value));
+ }
+}
+
+
+TEST(InterpreterParameter1) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(0);
+ builder.set_parameter_count(1);
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0)).Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ // Check for heap objects.
+ Handle<Object> true_value = handles.main_isolate()->factory()->true_value();
+ Handle<Object> return_val = callable(true_value).ToHandleChecked();
+ CHECK(return_val.is_identical_to(true_value));
+
+ // Check for Smis.
+ return_val = callable(Handle<Smi>(Smi::FromInt(3), handles.main_isolate()))
+ .ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(3));
+}
+
+
+TEST(InterpreterParameter8) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(0);
+ builder.set_parameter_count(8);
+ builder.LoadAccumulatorWithRegister(builder.Parameter(0))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(1))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(2))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(3))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(4))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(5))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(6))
+ .BinaryOperation(Token::Value::ADD, builder.Parameter(7))
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ typedef Handle<Object> H;
+ auto callable = tester.GetCallable<H, H, H, H, H, H, H, H>();
+
+ Handle<Smi> arg1 = Handle<Smi>(Smi::FromInt(1), handles.main_isolate());
+ Handle<Smi> arg2 = Handle<Smi>(Smi::FromInt(2), handles.main_isolate());
+ Handle<Smi> arg3 = Handle<Smi>(Smi::FromInt(3), handles.main_isolate());
+ Handle<Smi> arg4 = Handle<Smi>(Smi::FromInt(4), handles.main_isolate());
+ Handle<Smi> arg5 = Handle<Smi>(Smi::FromInt(5), handles.main_isolate());
+ Handle<Smi> arg6 = Handle<Smi>(Smi::FromInt(6), handles.main_isolate());
+ Handle<Smi> arg7 = Handle<Smi>(Smi::FromInt(7), handles.main_isolate());
+ Handle<Smi> arg8 = Handle<Smi>(Smi::FromInt(8), handles.main_isolate());
+ // Check for Smis.
+ Handle<Object> return_val =
+ callable(arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8)
+ .ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(36));
+}
+
+
+TEST(InterpreterLoadGlobal) {
+ HandleAndZoneScope handles;
+
+ // Test loading a global.
+ std::string source(
+ "var global = 321;\n"
+ "function " + InterpreterTester::function_name() + "() {\n"
+ " return global;\n"
+ "}");
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(321));
+}
+
+
+TEST(InterpreterCallGlobal) {
+ HandleAndZoneScope handles;
+
+ // Test calling a global function.
+ std::string source(
+ "function g_add(a, b) { return a + b; }\n"
+ "function " + InterpreterTester::function_name() + "() {\n"
+ " return g_add(5, 10);\n"
+ "}");
+ InterpreterTester tester(handles.main_isolate(), source.c_str());
+ auto callable = tester.GetCallable<>();
+
+ Handle<Object> return_val = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(15));
+}
+
+
+TEST(InterpreterLoadNamedProperty) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::FeedbackVectorSlotKind ic_kinds[] = {i::FeedbackVectorSlotKind::LOAD_IC};
+ i::StaticFeedbackVectorSpec feedback_spec(0, 1, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ factory->NewTypeFeedbackVector(&feedback_spec);
+
+ Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
+ name = factory->string_table()->LookupString(isolate, name);
+
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(0);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .LoadNamedProperty(builder.Parameter(0), vector->first_ic_slot_index(),
+ i::SLOPPY)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
+ // Test IC miss.
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(123));
+
+ // Test transition to monomorphic IC.
+ return_val = callable(object).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(123));
+
+ // Test transition to polymorphic IC.
+ Handle<Object> object2 =
+ InterpreterTester::NewObject("({ val : 456, other : 123 })");
+ return_val = callable(object2).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(456));
+
+ // Test transition to megamorphic IC.
+ Handle<Object> object3 =
+ InterpreterTester::NewObject("({ val : 789, val2 : 123 })");
+ callable(object3).ToHandleChecked();
+ Handle<Object> object4 =
+ InterpreterTester::NewObject("({ val : 789, val3 : 123 })");
+ callable(object4).ToHandleChecked();
+ Handle<Object> object5 =
+ InterpreterTester::NewObject("({ val : 789, val4 : 123 })");
+ return_val = callable(object5).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(789));
+}
+
+
+TEST(InterpreterLoadKeyedProperty) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::FeedbackVectorSlotKind ic_kinds[] = {
+ i::FeedbackVectorSlotKind::KEYED_LOAD_IC};
+ i::StaticFeedbackVectorSpec feedback_spec(0, 1, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ factory->NewTypeFeedbackVector(&feedback_spec);
+
+ Handle<i::String> key = factory->NewStringFromAsciiChecked("key");
+ key = factory->string_table()->LookupString(isolate, key);
+
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(key)
+ .LoadKeyedProperty(builder.Parameter(0), vector->first_ic_slot_index(),
+ i::SLOPPY)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<Object> object = InterpreterTester::NewObject("({ key : 123 })");
+ // Test IC miss.
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(123));
+
+ // Test transition to monomorphic IC.
+ return_val = callable(object).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(123));
+
+ // Test transition to megamorphic IC.
+ Handle<Object> object3 =
+ InterpreterTester::NewObject("({ key : 789, val2 : 123 })");
+ return_val = callable(object3).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(789));
+}
+
+
+TEST(InterpreterStoreNamedProperty) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::FeedbackVectorSlotKind ic_kinds[] = {i::FeedbackVectorSlotKind::STORE_IC};
+ i::StaticFeedbackVectorSpec feedback_spec(0, 1, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ factory->NewTypeFeedbackVector(&feedback_spec);
+
+ Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
+ name = factory->string_table()->LookupString(isolate, name);
+
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .StoreAccumulatorInRegister(Register(0))
+ .LoadLiteral(Smi::FromInt(999))
+ .StoreNamedProperty(builder.Parameter(0), Register(0),
+ vector->first_ic_slot_index(), i::SLOPPY)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
+ // Test IC miss.
+ Handle<Object> result;
+ callable(object).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+
+ // Test transition to monomorphic IC.
+ callable(object).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+
+ // Test transition to polymorphic IC.
+ Handle<Object> object2 =
+ InterpreterTester::NewObject("({ val : 456, other : 123 })");
+ callable(object2).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object2, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+
+ // Test transition to megamorphic IC.
+ Handle<Object> object3 =
+ InterpreterTester::NewObject("({ val : 789, val2 : 123 })");
+ callable(object3).ToHandleChecked();
+ Handle<Object> object4 =
+ InterpreterTester::NewObject("({ val : 789, val3 : 123 })");
+ callable(object4).ToHandleChecked();
+ Handle<Object> object5 =
+ InterpreterTester::NewObject("({ val : 789, val4 : 123 })");
+ callable(object5).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object5, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+}
+
+
+TEST(InterpreterStoreKeyedProperty) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::FeedbackVectorSlotKind ic_kinds[] = {
+ i::FeedbackVectorSlotKind::KEYED_STORE_IC};
+ i::StaticFeedbackVectorSpec feedback_spec(0, 1, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ factory->NewTypeFeedbackVector(&feedback_spec);
+
+ Handle<i::String> name = factory->NewStringFromAsciiChecked("val");
+ name = factory->string_table()->LookupString(isolate, name);
+
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .StoreAccumulatorInRegister(Register(0))
+ .LoadLiteral(Smi::FromInt(999))
+ .StoreKeyedProperty(builder.Parameter(0), Register(0),
+ vector->first_ic_slot_index(), i::SLOPPY)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(isolate, bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+ Handle<Object> object = InterpreterTester::NewObject("({ val : 123 })");
+ // Test IC miss.
+ Handle<Object> result;
+ callable(object).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+
+ // Test transition to monomorphic IC.
+ callable(object).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+
+ // Test transition to megamorphic IC.
+ Handle<Object> object2 =
+ InterpreterTester::NewObject("({ val : 456, other : 123 })");
+ callable(object2).ToHandleChecked();
+ CHECK(Runtime::GetObjectProperty(isolate, object2, name).ToHandle(&result));
+ CHECK_EQ(Smi::cast(*result), Smi::FromInt(999));
+}
+
+
+TEST(InterpreterCall) {
+ HandleAndZoneScope handles;
+ i::Isolate* isolate = handles.main_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::FeedbackVectorSlotKind ic_kinds[] = {i::FeedbackVectorSlotKind::LOAD_IC};
+ i::StaticFeedbackVectorSpec feedback_spec(0, 1, ic_kinds);
+ Handle<i::TypeFeedbackVector> vector =
+ factory->NewTypeFeedbackVector(&feedback_spec);
+
+ Handle<i::String> name = factory->NewStringFromAsciiChecked("func");
+ name = factory->string_table()->LookupString(isolate, name);
+
+ // Check with no args.
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .LoadNamedProperty(builder.Parameter(0), vector->first_ic_slot_index(),
+ i::SLOPPY)
+ .StoreAccumulatorInRegister(Register(0))
+ .Call(Register(0), builder.Parameter(0), 0)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<Object> object = InterpreterTester::NewObject(
+ "new (function Obj() { this.func = function() { return 0x265; }})()");
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(0x265));
+ }
+
+ // Check that receiver is passed properly.
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(1);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .LoadNamedProperty(builder.Parameter(0), vector->first_ic_slot_index(),
+ i::SLOPPY)
+ .StoreAccumulatorInRegister(Register(0))
+ .Call(Register(0), builder.Parameter(0), 0)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<Object> object = InterpreterTester::NewObject(
+ "new (function Obj() {"
+ " this.val = 1234;"
+ " this.func = function() { return this.val; };"
+ "})()");
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_val), Smi::FromInt(1234));
+ }
+
+ // Check with two parameters (+ receiver).
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(4);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .LoadNamedProperty(builder.Parameter(0), vector->first_ic_slot_index(),
+ i::SLOPPY)
+ .StoreAccumulatorInRegister(Register(0))
+ .LoadAccumulatorWithRegister(builder.Parameter(0))
+ .StoreAccumulatorInRegister(Register(1))
+ .LoadLiteral(Smi::FromInt(51))
+ .StoreAccumulatorInRegister(Register(2))
+ .LoadLiteral(Smi::FromInt(11))
+ .StoreAccumulatorInRegister(Register(3))
+ .Call(Register(0), Register(1), 2)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<Object> object = InterpreterTester::NewObject(
+ "new (function Obj() { "
+ " this.func = function(a, b) { return a - b; }"
+ "})()");
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+ CHECK(return_val->SameValue(Smi::FromInt(40)));
+ }
+
+ // Check with 10 parameters (+ receiver).
+ {
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(12);
+ builder.set_parameter_count(1);
+ builder.LoadLiteral(name)
+ .LoadNamedProperty(builder.Parameter(0), vector->first_ic_slot_index(),
+ i::SLOPPY)
+ .StoreAccumulatorInRegister(Register(0))
+ .LoadAccumulatorWithRegister(builder.Parameter(0))
+ .StoreAccumulatorInRegister(Register(1))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("a"))
+ .StoreAccumulatorInRegister(Register(2))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("b"))
+ .StoreAccumulatorInRegister(Register(3))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("c"))
+ .StoreAccumulatorInRegister(Register(4))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("d"))
+ .StoreAccumulatorInRegister(Register(5))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("e"))
+ .StoreAccumulatorInRegister(Register(6))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("f"))
+ .StoreAccumulatorInRegister(Register(7))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("g"))
+ .StoreAccumulatorInRegister(Register(8))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("h"))
+ .StoreAccumulatorInRegister(Register(9))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("i"))
+ .StoreAccumulatorInRegister(Register(10))
+ .LoadLiteral(factory->NewStringFromAsciiChecked("j"))
+ .StoreAccumulatorInRegister(Register(11))
+ .Call(Register(0), Register(1), 10)
+ .Return();
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+
+ InterpreterTester tester(handles.main_isolate(), bytecode_array, vector);
+ auto callable = tester.GetCallable<Handle<Object>>();
+
+ Handle<Object> object = InterpreterTester::NewObject(
+ "new (function Obj() { "
+ " this.prefix = \"prefix_\";"
+ " this.func = function(a, b, c, d, e, f, g, h, i, j) {"
+ " return this.prefix + a + b + c + d + e + f + g + h + i + j;"
+ " }"
+ "})()");
+ Handle<Object> return_val = callable(object).ToHandleChecked();
+ Handle<i::String> expected =
+ factory->NewStringFromAsciiChecked("prefix_abcdefghij");
+ CHECK(i::String::cast(*return_val)->Equals(*expected));
+ }
+}
+
+
+static BytecodeArrayBuilder& SetRegister(BytecodeArrayBuilder& builder,
+ Register reg, int value,
+ Register scratch) {
+ return builder.StoreAccumulatorInRegister(scratch)
+ .LoadLiteral(Smi::FromInt(value))
+ .StoreAccumulatorInRegister(reg)
+ .LoadAccumulatorWithRegister(scratch);
+}
+
+
+static BytecodeArrayBuilder& IncrementRegister(BytecodeArrayBuilder& builder,
+ Register reg, int value,
+ Register scratch) {
+ return builder.StoreAccumulatorInRegister(scratch)
+ .LoadLiteral(Smi::FromInt(value))
+ .BinaryOperation(Token::Value::ADD, reg)
+ .StoreAccumulatorInRegister(reg)
+ .LoadAccumulatorWithRegister(scratch);
+}
+
+
+TEST(InterpreterJumps) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(2);
+ builder.set_parameter_count(0);
+ Register reg(0), scratch(1);
+ BytecodeLabel label[3];
+
+ builder.LoadLiteral(Smi::FromInt(0))
+ .StoreAccumulatorInRegister(reg)
+ .Jump(&label[1]);
+ SetRegister(builder, reg, 1024, scratch).Bind(&label[0]);
+ IncrementRegister(builder, reg, 1, scratch).Jump(&label[2]);
+ SetRegister(builder, reg, 2048, scratch).Bind(&label[1]);
+ IncrementRegister(builder, reg, 2, scratch).Jump(&label[0]);
+ SetRegister(builder, reg, 4096, scratch).Bind(&label[2]);
+ IncrementRegister(builder, reg, 4, scratch)
+ .LoadAccumulatorWithRegister(reg)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_value)->value(), 7);
+}
+
+
+TEST(InterpreterConditionalJumps) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ builder.set_locals_count(2);
+ builder.set_parameter_count(0);
+ Register reg(0), scratch(1);
+ BytecodeLabel label[2];
+ BytecodeLabel done, done1;
+
+ builder.LoadLiteral(Smi::FromInt(0))
+ .StoreAccumulatorInRegister(reg)
+ .LoadFalse()
+ .JumpIfFalse(&label[0]);
+ IncrementRegister(builder, reg, 1024, scratch)
+ .Bind(&label[0])
+ .LoadTrue()
+ .JumpIfFalse(&done);
+ IncrementRegister(builder, reg, 1, scratch).LoadTrue().JumpIfTrue(&label[1]);
+ IncrementRegister(builder, reg, 2048, scratch).Bind(&label[1]);
+ IncrementRegister(builder, reg, 2, scratch).LoadFalse().JumpIfTrue(&done1);
+ IncrementRegister(builder, reg, 4, scratch)
+ .LoadAccumulatorWithRegister(reg)
+ .Bind(&done)
+ .Bind(&done1)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK_EQ(Smi::cast(*return_value)->value(), 7);
+}
+
+
+static const Token::Value kComparisonTypes[] = {
+ Token::Value::EQ, Token::Value::NE, Token::Value::EQ_STRICT,
+ Token::Value::NE_STRICT, Token::Value::LTE, Token::Value::LTE,
+ Token::Value::GT, Token::Value::GTE};
+
+
+template <typename T>
+bool CompareC(Token::Value op, T lhs, T rhs, bool types_differed = false) {
+ switch (op) {
+ case Token::Value::EQ:
+ return lhs == rhs;
+ case Token::Value::NE:
+ return lhs != rhs;
+ case Token::Value::EQ_STRICT:
+ return (lhs == rhs) && !types_differed;
+ case Token::Value::NE_STRICT:
+ return (lhs != rhs) || types_differed;
+ case Token::Value::LT:
+ return lhs < rhs;
+ case Token::Value::LTE:
+ return lhs <= rhs;
+ case Token::Value::GT:
+ return lhs > rhs;
+ case Token::Value::GTE:
+ return lhs >= rhs;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+
+TEST(InterpreterSmiComparisons) {
+ // NB Constants cover 31-bit space.
+ int inputs[] = {v8::internal::kMinInt / 2,
+ v8::internal::kMinInt / 4,
+ -108733832,
+ -999,
+ -42,
+ -2,
+ -1,
+ 0,
+ +1,
+ +2,
+ 42,
+ 12345678,
+ v8::internal::kMaxInt / 4,
+ v8::internal::kMaxInt / 2};
+
+ for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
+ Token::Value comparison = kComparisonTypes[c];
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ HandleAndZoneScope handles;
+ BytecodeArrayBuilder builder(handles.main_isolate(),
+ handles.main_zone());
+ Register r0(0);
+ builder.set_locals_count(1);
+ builder.set_parameter_count(0);
+ builder.LoadLiteral(Smi::FromInt(inputs[i]))
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(Smi::FromInt(inputs[j]))
+ .CompareOperation(comparison, r0, LanguageMode::SLOPPY)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(comparison, inputs[i], inputs[j]));
+ }
+ }
+ }
+}
+
+
+TEST(InterpreterHeapNumberComparisons) {
+ double inputs[] = {std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ -0.001,
+ 0.01,
+ 0.1000001,
+ 1e99,
+ -1e-99};
+ for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
+ Token::Value comparison = kComparisonTypes[c];
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ BytecodeArrayBuilder builder(handles.main_isolate(),
+ handles.main_zone());
+ Register r0(0);
+ builder.set_locals_count(1);
+ builder.set_parameter_count(0);
+ builder.LoadLiteral(factory->NewHeapNumber(inputs[i]))
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(factory->NewHeapNumber(inputs[j]))
+ .CompareOperation(comparison, r0, LanguageMode::SLOPPY)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(comparison, inputs[i], inputs[j]));
+ }
+ }
+ }
+}
+
+
+TEST(InterpreterStringComparisons) {
+ std::string inputs[] = {"A", "abc", "z", "", "Foo!", "Foo"};
+
+ for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
+ Token::Value comparison = kComparisonTypes[c];
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ const char* lhs = inputs[i].c_str();
+ const char* rhs = inputs[j].c_str();
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ BytecodeArrayBuilder builder(handles.main_isolate(),
+ handles.main_zone());
+ Register r0(0);
+ builder.set_locals_count(1);
+ builder.set_parameter_count(0);
+ builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs))
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(factory->NewStringFromAsciiChecked(rhs))
+ .CompareOperation(comparison, r0, LanguageMode::SLOPPY)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(comparison, inputs[i], inputs[j]));
+ }
+ }
+ }
+}
+
+
+TEST(InterpreterMixedComparisons) {
+ // This test compares a HeapNumber with a String. The latter is
+ // convertible to a HeapNumber so comparison will be between numeric
+ // values except for the strict comparisons where no conversion is
+ // performed.
+ const char* inputs[] = {"-1.77", "-40.333", "0.01", "55.77e5", "2.01"};
+
+ i::UnicodeCache unicode_cache;
+
+ for (size_t c = 0; c < arraysize(kComparisonTypes); c++) {
+ Token::Value comparison = kComparisonTypes[c];
+ for (size_t i = 0; i < arraysize(inputs); i++) {
+ for (size_t j = 0; j < arraysize(inputs); j++) {
+ for (int pass = 0; pass < 2; pass++) {
+ const char* lhs_cstr = inputs[i];
+ const char* rhs_cstr = inputs[j];
+ double lhs = StringToDouble(&unicode_cache, lhs_cstr,
+ i::ConversionFlags::NO_FLAGS);
+ double rhs = StringToDouble(&unicode_cache, rhs_cstr,
+ i::ConversionFlags::NO_FLAGS);
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ BytecodeArrayBuilder builder(handles.main_isolate(),
+ handles.main_zone());
+ Register r0(0);
+ builder.set_locals_count(1);
+ builder.set_parameter_count(0);
+ if (pass == 0) {
+ // Comparison with HeapNumber on the lhs and String on the rhs
+ builder.LoadLiteral(factory->NewNumber(lhs))
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(factory->NewStringFromAsciiChecked(rhs_cstr))
+ .CompareOperation(comparison, r0, LanguageMode::SLOPPY)
+ .Return();
+ } else {
+ // Comparison with HeapNumber on the rhs and String on the lhs
+ builder.LoadLiteral(factory->NewStringFromAsciiChecked(lhs_cstr))
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(factory->NewNumber(rhs))
+ .CompareOperation(comparison, r0, LanguageMode::SLOPPY)
+ .Return();
+ }
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(),
+ CompareC(comparison, lhs, rhs, true));
+ }
+ }
+ }
+ }
+}
+
+
+TEST(InterpreterInstanceOf) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ Handle<i::String> name = factory->NewStringFromAsciiChecked("cons");
+ Handle<i::JSFunction> func = factory->NewFunction(name);
+ Handle<i::JSObject> instance = factory->NewJSObject(func);
+ Handle<i::Object> other = factory->NewNumber(3.3333);
+ Handle<i::Object> cases[] = {Handle<i::Object>::cast(instance), other};
+ for (size_t i = 0; i < arraysize(cases); i++) {
+ bool expected_value = (i == 0);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ Register r0(0);
+ builder.set_locals_count(1);
+ builder.set_parameter_count(0);
+ builder.LoadLiteral(cases[i]);
+ builder.StoreAccumulatorInRegister(r0)
+ .LoadLiteral(func)
+ .CompareOperation(Token::Value::INSTANCEOF, r0, LanguageMode::SLOPPY)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(), expected_value);
+ }
+}
+
+
+TEST(InterpreterTestIn) {
+ HandleAndZoneScope handles;
+ i::Factory* factory = handles.main_isolate()->factory();
+ // Allocate an array
+ Handle<i::JSArray> array =
+ factory->NewJSArray(i::ElementsKind::FAST_SMI_ELEMENTS);
+ // Check for these properties on the array object
+ const char* properties[] = {"length", "fuzzle", "x", "0"};
+ for (size_t i = 0; i < arraysize(properties); i++) {
+ bool expected_value = (i == 0);
+ BytecodeArrayBuilder builder(handles.main_isolate(), handles.main_zone());
+ Register r0(0);
+ builder.set_locals_count(1);
+ builder.set_parameter_count(0);
+ builder.LoadLiteral(factory->NewStringFromAsciiChecked(properties[i]))
+ .StoreAccumulatorInRegister(r0)
+ .LoadLiteral(Handle<Object>::cast(array))
+ .CompareOperation(Token::Value::IN, r0, LanguageMode::SLOPPY)
+ .Return();
+
+ Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray();
+ InterpreterTester tester(handles.main_isolate(), bytecode_array);
+ auto callable = tester.GetCallable<>();
+ Handle<Object> return_value = callable().ToHandleChecked();
+ CHECK(return_value->IsBoolean());
+ CHECK_EQ(return_value->BooleanValue(), expected_value);
+ }
+}
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 74388d1785..b9d0f61ef8 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -30,26 +30,16 @@
#include "src/accessors.h"
#include "src/api.h"
+#include "test/cctest/heap-tester.h"
using namespace v8::internal;
-static AllocationResult AllocateAfterFailures() {
- static int attempts = 0;
-
- // The first 4 times we simulate a full heap, by returning retry.
- if (++attempts < 4) return AllocationResult::Retry();
-
- // Expose some private stuff on Heap.
- TestHeap* heap = CcTest::test_heap();
-
- // Now that we have returned 'retry' 4 times, we are in a last-chance
- // scenario, with always_allocate. See CALL_AND_RETRY. Test that all
- // allocations succeed.
+AllocationResult v8::internal::HeapTester::AllocateAfterFailures() {
+ Heap* heap = CcTest::heap();
// New space.
- SimulateFullSpace(heap->new_space());
heap->AllocateByteArray(100).ToObjectChecked();
heap->AllocateFixedArray(100, NOT_TENURED).ToObjectChecked();
@@ -97,16 +87,21 @@ static AllocationResult AllocateAfterFailures() {
}
-static Handle<Object> Test() {
- CALL_HEAP_FUNCTION(CcTest::i_isolate(), AllocateAfterFailures(), Object);
+Handle<Object> v8::internal::HeapTester::TestAllocateAfterFailures() {
+ // Similar to what the CALL_AND_RETRY macro does in the last-resort case, we
+ // are wrapping the allocator function in an AlwaysAllocateScope. Test that
+ // all allocations succeed immediately without any retry.
+ CcTest::heap()->CollectAllAvailableGarbage("panic");
+ AlwaysAllocateScope scope(CcTest::i_isolate());
+ return handle(AllocateAfterFailures().ToObjectChecked(), CcTest::i_isolate());
}
-TEST(StressHandles) {
+HEAP_TEST(StressHandles) {
v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Context> env = v8::Context::New(CcTest::isolate());
env->Enter();
- Handle<Object> o = Test();
+ Handle<Object> o = TestAllocateAfterFailures();
CHECK(o->IsTrue());
env->Exit();
}
@@ -117,7 +112,8 @@ void TestGetter(
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
- info.GetReturnValue().Set(v8::Utils::ToLocal(Test()));
+ info.GetReturnValue().Set(v8::Utils::ToLocal(
+ v8::internal::HeapTester::TestAllocateAfterFailures()));
}
@@ -222,9 +218,12 @@ TEST(CodeRange) {
(Page::kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
size_t allocated = 0;
- Address base = code_range.AllocateRawMemory(requested,
- requested,
- &allocated);
+
+ // The request size has to be at least 2 code guard pages larger than the
+ // actual commit size.
+ Address base = code_range.AllocateRawMemory(
+ requested, requested - (2 * MemoryAllocator::CodePageGuardSize()),
+ &allocated);
CHECK(base != NULL);
blocks.Add(::Block(base, static_cast<int>(allocated)));
current_allocated += static_cast<int>(allocated);
@@ -241,6 +240,4 @@ TEST(CodeRange) {
}
}
}
-
- code_range.TearDown();
}
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
new file mode 100644
index 0000000000..f6d1ef0718
--- /dev/null
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -0,0 +1,93 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(jochen): Remove this after the setting is turned on globally.
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
+#include "test/cctest/cctest.h"
+
+#include "include/v8.h"
+
+
+#ifdef V8_JS_ACCESSORS
+static void CppAccessor(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(42);
+}
+
+static const char* JsAccessor =
+ "function firstChildJS(value) { return 41; }; firstChildJS";
+
+TEST(JavascriptAccessors) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ LocalContext env;
+
+ // We emulate Embedder-created DOM Node instances. Specifically:
+ // - 'parent': FunctionTemplate ~= DOM Node superclass
+ // - 'child': FunctionTemplate ~= a specific DOM node type, like a <div />
+ //
+ // We'll install both a C++-based and a JS-based accessor on the parent,
+ // and expect it to be callable on the child.
+
+ // Setup the parent template ( =~ DOM Node w/ accessors).
+ v8::Local<v8::FunctionTemplate> parent = v8::FunctionTemplate::New(isolate);
+ {
+ auto signature = v8::Signature::New(isolate, parent);
+
+ // cpp accessor as "firstChild":
+ parent->PrototypeTemplate()->SetAccessorProperty(
+ v8_str("firstChild"),
+ v8::FunctionTemplate::New(isolate, CppAccessor, v8::Local<v8::Value>(),
+ signature));
+
+ // JS accessor as "firstChildJS":
+ auto js_accessor = v8::Local<v8::Function>::Cast(CompileRun(JsAccessor));
+ parent->PrototypeTemplate()->SetAccessorProperty(v8_str("firstChildJS"),
+ js_accessor);
+ }
+
+ // Setup child object ( =~ a specific DOM Node, e.g. a <div> ).
+ // Also, make a creation function on the global object, so we can access it
+ // in a test.
+ v8::Local<v8::FunctionTemplate> child = v8::FunctionTemplate::New(isolate);
+ child->Inherit(parent);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("Node"),
+ child->GetFunction(env.local()).ToLocalChecked())
+ .IsJust());
+
+ // Setup done: Let's test it:
+
+ // The simple case: Run it once.
+ ExpectInt32("var n = new Node(); n.firstChild", 42);
+ ExpectInt32("var n = new Node(); n.firstChildJS", 41);
+
+ // Run them in a loop. This will likely trigger the optimizing compiler:
+ ExpectInt32(
+ "var m = new Node(); "
+ "var sum = 0; "
+ "for (var i = 0; i < 3; ++i) { "
+ " sum += m.firstChild; "
+ " sum += m.firstChildJS; "
+ "}; "
+ "sum;",
+ 3 * (42 + 41));
+
+ // Obtain the accessor and call it via apply on the Node:
+ ExpectInt32(
+ "var n = new Node(); "
+ "var g = Object.getOwnPropertyDescriptor("
+ " n.__proto__.__proto__, 'firstChild')['get']; "
+ "g.apply(n);",
+ 42);
+ ExpectInt32(
+ "var n = new Node(); "
+ "var g = Object.getOwnPropertyDescriptor("
+ " n.__proto__.__proto__, 'firstChildJS')['get']; "
+ "g.apply(n);",
+ 41);
+
+ // TODO(vogelheim): Verify compatible receiver check works.
+}
+#endif // V8_JS_ACCESSORS
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index e0f9d30b7c..ab44af7f93 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -50,6 +50,7 @@
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
+#include "test/cctest/heap-tester.h"
static const bool kLogThreading = false;
@@ -6686,7 +6687,11 @@ static void ResetUseValueAndSetFlag(
}
-static void ResetWeakHandle(bool global_gc) {
+void v8::internal::HeapTester::ResetWeakHandle(bool global_gc) {
+ using v8::Context;
+ using v8::Local;
+ using v8::Object;
+
v8::Isolate* iso = CcTest::isolate();
v8::HandleScope scope(iso);
v8::Handle<Context> context = Context::New(iso);
@@ -6701,8 +6706,7 @@ static void ResetWeakHandle(bool global_gc) {
object_a.handle.Reset(iso, a);
object_b.handle.Reset(iso, b);
if (global_gc) {
- CcTest::heap()->CollectAllGarbage(
- TestHeap::Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
} else {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
@@ -6720,8 +6724,7 @@ static void ResetWeakHandle(bool global_gc) {
CHECK(object_b.handle.IsIndependent());
}
if (global_gc) {
- CcTest::heap()->CollectAllGarbage(
- TestHeap::Heap::kAbortIncrementalMarkingMask);
+ CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
} else {
CcTest::heap()->CollectGarbage(i::NEW_SPACE);
}
@@ -6730,9 +6733,9 @@ static void ResetWeakHandle(bool global_gc) {
}
-THREADED_TEST(ResetWeakHandle) {
- ResetWeakHandle(false);
- ResetWeakHandle(true);
+THREADED_HEAP_TEST(ResetWeakHandle) {
+ v8::internal::HeapTester::ResetWeakHandle(false);
+ v8::internal::HeapTester::ResetWeakHandle(true);
}
@@ -8789,60 +8792,6 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
}
-TEST(SuperAccessControl) {
- i::FLAG_allow_natives_syntax = true;
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> obj_template =
- v8::ObjectTemplate::New(isolate);
- obj_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
- LocalContext env;
- env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
-
- {
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "var f = { m() { return super.hasOwnProperty; } }.m;"
- "var m = %ToMethod(f, prohibited);"
- "m();");
- CHECK(try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "var f = {m() { return super[42]; } }.m;"
- "var m = %ToMethod(f, prohibited);"
- "m();");
- CHECK(try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "var f = {m() { super.hasOwnProperty = function () {}; } }.m;"
- "var m = %ToMethod(f, prohibited);"
- "m();");
- CHECK(try_catch.HasCaught());
- }
-
- {
- v8::TryCatch try_catch(isolate);
- CompileRun(
- "Object.defineProperty(Object.prototype, 'x', { set : function(){}});"
- "var f = {"
- " m() { "
- " 'use strict';"
- " super.x = function () {};"
- " }"
- "}.m;"
- "var m = %ToMethod(f, prohibited);"
- "m();");
- CHECK(try_catch.HasCaught());
- }
-}
-
-
TEST(Regress470113) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -9792,8 +9741,8 @@ THREADED_TEST(ConstructorForObject) {
value = instance->CallAsConstructor(1, args);
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ(
- 0, strcmp("TypeError: #<Object> is not a function", *exception_value2));
+ CHECK_EQ(0,
+ strcmp("TypeError: object is not a function", *exception_value2));
try_catch.Reset();
}
@@ -10162,8 +10111,8 @@ THREADED_TEST(CallAsFunction) {
CHECK(value.IsEmpty());
CHECK(try_catch.HasCaught());
String::Utf8Value exception_value2(try_catch.Exception());
- CHECK_EQ(0, strcmp("TypeError: [object Object] is not a function",
- *exception_value2));
+ CHECK_EQ(0,
+ strcmp("TypeError: object is not a function", *exception_value2));
try_catch.Reset();
}
@@ -14242,32 +14191,6 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
}
-THREADED_TEST(SkipArrayBufferDuringScavenge) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope handle_scope(isolate);
-
- // Make sure the pointer looks like a heap object
- Local<v8::Object> tmp = v8::Object::New(isolate);
- uint8_t* store_ptr =
- reinterpret_cast<uint8_t*>(*reinterpret_cast<uintptr_t*>(*tmp));
-
- // Make `store_ptr` point to from space
- CcTest::heap()->CollectGarbage(i::NEW_SPACE);
-
- // Create ArrayBuffer with pointer-that-cannot-be-visited in the backing store
- Local<v8::ArrayBuffer> ab = v8::ArrayBuffer::New(isolate, store_ptr, 8);
-
- // Should not crash,
- // i.e. backing store pointer should not be treated as a heap object pointer
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
-
- // Use `ab` to silence compiler warning
- CHECK_EQ(ab->GetContents().Data(), store_ptr);
-}
-
-
THREADED_TEST(SharedUint8Array) {
i::FLAG_harmony_sharedarraybuffer = true;
TypedArrayTestHelper<uint8_t, v8::Uint8Array, i::FixedUint8Array,
@@ -15861,7 +15784,8 @@ THREADED_TEST(QuietSignalingNaNs) {
uint64_t stored_bits = DoubleToBits(stored_number);
// Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
- !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
+ !defined(_MIPS_ARCH_MIPS64R6) && !defined(_MIPS_ARCH_MIPS32R6) && \
+ !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
@@ -15882,7 +15806,8 @@ THREADED_TEST(QuietSignalingNaNs) {
uint64_t stored_bits = DoubleToBits(stored_date);
// Check if quiet nan (bits 51..62 all set).
#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
- !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
+ !defined(_MIPS_ARCH_MIPS64R6) && !defined(_MIPS_ARCH_MIPS32R6) && \
+ !defined(USE_SIMULATOR)
// Most significant fraction bit for quiet nan is set to 0
// on MIPS architecture. Allowed by IEEE-754.
CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
@@ -16115,8 +16040,7 @@ THREADED_TEST(FunctionGetDisplayName) {
"}};"
"var g = function() {"
" arguments.callee.displayName = 'set_in_runtime';"
- "}; g();"
- ;
+ "}; g();";
v8::ScriptOrigin origin =
v8::ScriptOrigin(v8::String::NewFromUtf8(env->GetIsolate(), "test"));
v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), code), &origin)
@@ -17932,7 +17856,7 @@ TEST(SetErrorMessageForCodeGenFromStrings) {
v8::HandleScope scope(context->GetIsolate());
TryCatch try_catch(context->GetIsolate());
- Handle<String> message = v8_str("Message") ;
+ Handle<String> message = v8_str("Message");
Handle<String> expected_message = v8_str("Uncaught EvalError: Message");
V8::SetAllowCodeGenerationFromStringsCallback(&CodeGenerationDisallowed);
context->AllowCodeGenerationFromStrings(false);
@@ -19296,7 +19220,6 @@ void CheckCorrectThrow(const char* script) {
TEST(AccessCheckThrows) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_turbo_try_catch = true;
v8::V8::Initialize();
v8::V8::SetFailedAccessCheckCallbackFunction(&FailedAccessCheckThrows);
v8::Isolate* isolate = CcTest::isolate();
@@ -19346,8 +19269,7 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%DeleteProperty_Sloppy(other, '1')");
CheckCorrectThrow("%DeleteProperty_Strict(other, '1')");
CheckCorrectThrow("%HasOwnProperty(other, 'x')");
- CheckCorrectThrow("%HasProperty(other, 'x')");
- CheckCorrectThrow("%HasElement(other, 1)");
+ CheckCorrectThrow("%HasProperty('x', other)");
CheckCorrectThrow("%IsPropertyEnumerable(other, 'x')");
// PROPERTY_ATTRIBUTES_NONE = 0
CheckCorrectThrow("%DefineAccessorPropertyUnchecked("
@@ -21664,14 +21586,14 @@ TEST(StrongObjectDelete) {
}
-static void ExtrasExportsTestRuntimeFunction(
+static void ExtrasBindingTestRuntimeFunction(
const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK_EQ(3, args[0]->Int32Value());
args.GetReturnValue().Set(v8_num(7));
}
-TEST(ExtrasExportsObject) {
+TEST(ExtrasBindingObject) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
LocalContext env;
@@ -21687,7 +21609,7 @@ TEST(ExtrasExportsObject) {
CHECK_EQ(5, result->Int32Value());
v8::Handle<v8::FunctionTemplate> runtimeFunction =
- v8::FunctionTemplate::New(isolate, ExtrasExportsTestRuntimeFunction);
+ v8::FunctionTemplate::New(isolate, ExtrasBindingTestRuntimeFunction);
binding->Set(v8_str("runtime"), runtimeFunction->GetFunction());
func =
binding->Get(v8_str("testExtraShouldCallToRuntime")).As<v8::Function>();
@@ -21696,6 +21618,72 @@ TEST(ExtrasExportsObject) {
}
+TEST(ExperimentalExtras) {
+ i::FLAG_experimental_extras = true;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext env;
+
+ // standalone.gypi ensures we include the test-experimental-extra.js file,
+ // which should export the tested functions.
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ auto func = binding->Get(v8_str("testExperimentalExtraShouldReturnTen"))
+ .As<v8::Function>();
+ auto undefined = v8::Undefined(isolate);
+ auto result = func->Call(undefined, 0, {}).As<v8::Number>();
+ CHECK_EQ(10, result->Int32Value());
+
+ v8::Handle<v8::FunctionTemplate> runtimeFunction =
+ v8::FunctionTemplate::New(isolate, ExtrasBindingTestRuntimeFunction);
+ binding->Set(v8_str("runtime"), runtimeFunction->GetFunction());
+ func = binding->Get(v8_str("testExperimentalExtraShouldCallToRuntime"))
+ .As<v8::Function>();
+ result = func->Call(undefined, 0, {}).As<v8::Number>();
+ CHECK_EQ(7, result->Int32Value());
+}
+
+
+TEST(ExtrasUtilsObject) {
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope handle_scope(isolate);
+
+ LocalContext env;
+ v8::Local<v8::Object> binding = env->GetExtrasBindingObject();
+
+ auto func = binding->Get(v8_str("testExtraCanUseUtils")).As<v8::Function>();
+ auto undefined = v8::Undefined(isolate);
+ auto result = func->Call(undefined, 0, {}).As<v8::Object>();
+
+ auto private_symbol = result->Get(v8_str("privateSymbol")).As<v8::Symbol>();
+ i::Handle<i::Symbol> ips = v8::Utils::OpenHandle(*private_symbol);
+ CHECK_EQ(true, ips->IsPrivate());
+
+ CompileRun("var result = 0; function store(x) { result = x; }");
+ auto store = CompileRun("store").As<v8::Function>();
+
+ auto fulfilled_promise =
+ result->Get(v8_str("fulfilledPromise")).As<v8::Promise>();
+ fulfilled_promise->Then(store);
+ isolate->RunMicrotasks();
+ CHECK_EQ(1, CompileRun("result")->Int32Value());
+
+ auto fulfilled_promise_2 =
+ result->Get(v8_str("fulfilledPromise2")).As<v8::Promise>();
+ fulfilled_promise_2->Then(store);
+ isolate->RunMicrotasks();
+ CHECK_EQ(2, CompileRun("result")->Int32Value());
+
+ auto rejected_promise =
+ result->Get(v8_str("rejectedPromise")).As<v8::Promise>();
+ rejected_promise->Catch(store);
+ isolate->RunMicrotasks();
+ CHECK_EQ(3, CompileRun("result")->Int32Value());
+}
+
+
TEST(Map) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -21851,7 +21839,6 @@ class FutexInterruptionThread : public v8::base::Thread {
TEST(FutexInterruption) {
i::FLAG_harmony_sharedarraybuffer = true;
- i::FLAG_harmony_atomics = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
LocalContext env;
@@ -21866,6 +21853,7 @@ TEST(FutexInterruption) {
"var i32a = new Int32Array(ab);"
"Atomics.futexWait(i32a, 0, 0);");
CHECK(try_catch.HasTerminated());
+ timeout_thread.Join();
}
@@ -21874,34 +21862,4 @@ TEST(EstimatedContextSize) {
v8::HandleScope scope(isolate);
LocalContext env;
CHECK(50000 < env->EstimatedSize());
-
-
-static int nb_uncaught_exception_callback_calls = 0;
-
-
-bool NoAbortOnUncaughtException(v8::Isolate* isolate) {
- ++nb_uncaught_exception_callback_calls;
- return false;
-}
-
-
-TEST(AbortOnUncaughtExceptionNoAbort) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate);
- LocalContext env(NULL, global_template);
-
- i::FLAG_abort_on_uncaught_exception = true;
- isolate->SetAbortOnUncaughtExceptionCallback(NoAbortOnUncaughtException);
-
- CompileRun("function boom() { throw new Error(\"boom\") }");
-
- v8::Local<v8::Object> global_object = env->Global();
- v8::Local<v8::Function> foo =
- v8::Local<v8::Function>::Cast(global_object->Get(v8_str("boom")));
-
- foo->Call(global_object, 0, NULL);
-
- CHECK_EQ(1, nb_uncaught_exception_callback_calls);
}
diff --git a/deps/v8/test/cctest/test-api.h b/deps/v8/test/cctest/test-api.h
index 17e0711af5..379fe9c9c2 100644
--- a/deps/v8/test/cctest/test-api.h
+++ b/deps/v8/test/cctest/test-api.h
@@ -4,8 +4,8 @@
#include "src/v8.h"
-#include "src/cpu-profiler.h"
#include "src/isolate.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/vm-state.h"
#include "test/cctest/cctest.h"
@@ -20,7 +20,7 @@ static void CheckReturnValue(const T& t, i::Address callback) {
bool is_runtime = (*o)->IsTheHole();
rv.Set(true);
CHECK(!(*o)->IsTheHole() && !(*o)->IsUndefined());
- rv.Set(v8::Handle<v8::Object>());
+ rv.Set(v8::Local<v8::Object>());
CHECK((*o)->IsTheHole() || (*o)->IsUndefined());
CHECK_EQ(is_runtime, (*o)->IsTheHole());
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(t.GetIsolate());
diff --git a/deps/v8/test/cctest/test-asm-validator.cc b/deps/v8/test/cctest/test-asm-validator.cc
new file mode 100644
index 0000000000..0cc71ec2f5
--- /dev/null
+++ b/deps/v8/test/cctest/test-asm-validator.cc
@@ -0,0 +1,915 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/ast-expression-visitor.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "src/typing-asm.h"
+#include "src/zone-type-cache.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/expression-type-collector.h"
+#include "test/cctest/expression-type-collector-macros.h"
+
+// Macros for function types.
+#define FUNC_V_TYPE Bounds(Type::Function(Type::Undefined(), zone))
+#define FUNC_I_TYPE Bounds(Type::Function(cache.kInt32, zone))
+#define FUNC_F_TYPE Bounds(Type::Function(cache.kFloat32, zone))
+#define FUNC_D_TYPE Bounds(Type::Function(cache.kFloat64, zone))
+#define FUNC_D2D_TYPE \
+ Bounds(Type::Function(cache.kFloat64, cache.kFloat64, zone))
+#define FUNC_N2F_TYPE \
+ Bounds(Type::Function(cache.kFloat32, Type::Number(), zone))
+#define FUNC_I2I_TYPE Bounds(Type::Function(cache.kInt32, cache.kInt32, zone))
+#define FUNC_II2D_TYPE \
+ Bounds(Type::Function(cache.kFloat64, cache.kInt32, cache.kInt32, zone))
+#define FUNC_II2I_TYPE \
+ Bounds(Type::Function(cache.kInt32, cache.kInt32, cache.kInt32, zone))
+#define FUNC_DD2D_TYPE \
+ Bounds(Type::Function(cache.kFloat64, cache.kFloat64, cache.kFloat64, zone))
+#define FUNC_N2N_TYPE \
+ Bounds(Type::Function(Type::Number(), Type::Number(), zone))
+
+// Macros for array types.
+#define FLOAT64_ARRAY_TYPE Bounds(Type::Array(cache.kFloat64, zone))
+#define FUNC_I2I_ARRAY_TYPE \
+ Bounds(Type::Array(Type::Function(cache.kInt32, cache.kInt32, zone), zone))
+
+using namespace v8::internal;
+
+namespace {
+
+std::string Validate(Zone* zone, const char* source,
+ ZoneVector<ExpressionTypeEntry>* types) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::Handle<i::String> source_code =
+ factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
+
+ i::Handle<i::Script> script = factory->NewScript(source_code);
+
+ i::ParseInfo info(zone, script);
+ i::Parser parser(&info);
+ parser.set_allow_harmony_arrow_functions(true);
+ parser.set_allow_harmony_sloppy(true);
+ info.set_global();
+ info.set_lazy(false);
+ info.set_allow_lazy_parsing(false);
+ info.set_toplevel(true);
+
+ CHECK(i::Compiler::ParseAndAnalyze(&info));
+
+ FunctionLiteral* root =
+ info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
+ AsmTyper typer(isolate, zone, *script, root);
+ if (typer.Validate()) {
+ ExpressionTypeCollector(isolate, zone, root, types).Run();
+ return "";
+ } else {
+ return typer.error_message();
+ }
+}
+}
+
+
+TEST(ValidateMinimum) {
+ const char test_function[] =
+ "function GeometricMean(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ "\n"
+ " var exp = stdlib.Math.exp;\n"
+ " var log = stdlib.Math.log;\n"
+ " var values = new stdlib.Float64Array(buffer);\n"
+ "\n"
+ " function logSum(start, end) {\n"
+ " start = start|0;\n"
+ " end = end|0;\n"
+ "\n"
+ " var sum = 0.0, p = 0, q = 0;\n"
+ "\n"
+ " // asm.js forces byte addressing of the heap by requiring shifting "
+ "by 3\n"
+ " for (p = start << 3, q = end << 3; (p|0) < (q|0); p = (p + 8)|0) {\n"
+ " sum = sum + +log(values[p>>3]);\n"
+ " }\n"
+ "\n"
+ " return +sum;\n"
+ " }\n"
+ "\n"
+ " function geometricMean(start, end) {\n"
+ " start = start|0;\n"
+ " end = end|0;\n"
+ "\n"
+ " return +exp(+logSum(start, end) / +((end - start)|0));\n"
+ " }\n"
+ "\n"
+ " return { geometricMean: geometricMean };\n"
+ "}\n";
+
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("", Validate(zone, test_function, &types));
+ ZoneTypeCache cache;
+
+ CHECK_TYPES_BEGIN {
+ // Module.
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ // function logSum
+ CHECK_EXPR(FunctionLiteral, FUNC_II2D_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
+ CHECK_VAR(sum, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(p, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(q, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ // for (p = start << 3, q = end << 3;
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(p, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(q, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ }
+ // (p|0) < (q|0);
+ CHECK_EXPR(CompareOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(p, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(q, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ // p = (p + 8)|0) {\n"
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(p, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(p, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ // sum = sum + +log(values[p>>3]);
+ CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
+ CHECK_VAR(sum, Bounds(cache.kFloat64));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_VAR(sum, Bounds(cache.kFloat64));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(Call, Bounds(cache.kFloat64)) {
+ CHECK_VAR(log, FUNC_D2D_TYPE);
+ CHECK_EXPR(Property, Bounds(cache.kFloat64)) {
+ CHECK_VAR(values, FLOAT64_ARRAY_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(p, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ }
+ }
+ // return +sum;
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_VAR(sum, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ }
+ // function geometricMean
+ CHECK_EXPR(FunctionLiteral, FUNC_II2D_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ // return +exp(+logSum(start, end) / +((end - start)|0));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(Call, Bounds(cache.kFloat64)) {
+ CHECK_VAR(exp, FUNC_D2D_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(Call, Bounds(cache.kFloat64)) {
+ CHECK_VAR(logSum, FUNC_II2D_TYPE);
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(end, Bounds(cache.kInt32));
+ CHECK_VAR(start, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ }
+ // "use asm";
+ CHECK_EXPR(Literal, Bounds(Type::String()));
+ // var exp = stdlib.Math.exp;
+ CHECK_EXPR(Assignment, FUNC_D2D_TYPE) {
+ CHECK_VAR(exp, FUNC_D2D_TYPE);
+ CHECK_EXPR(Property, FUNC_D2D_TYPE) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(stdlib, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // var log = stdlib.Math.log;
+ CHECK_EXPR(Assignment, FUNC_D2D_TYPE) {
+ CHECK_VAR(log, FUNC_D2D_TYPE);
+ CHECK_EXPR(Property, FUNC_D2D_TYPE) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(stdlib, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // var values = new stdlib.Float64Array(buffer);
+ CHECK_EXPR(Assignment, FLOAT64_ARRAY_TYPE) {
+ CHECK_VAR(values, FLOAT64_ARRAY_TYPE);
+ CHECK_EXPR(CallNew, FLOAT64_ARRAY_TYPE) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(stdlib, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_VAR(buffer, Bounds::Unbounded());
+ }
+ }
+ // return { geometricMean: geometricMean };
+ CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) {
+ CHECK_VAR(geometricMean, FUNC_II2D_TYPE);
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
+
+
+#define HARNESS_STDLIB() \
+ "var Infinity = stdlib.Infinity;\n" \
+ "var NaN = stdlib.NaN;\n" \
+ "var acos = stdlib.Math.acos;\n" \
+ "var asin = stdlib.Math.asin;\n" \
+ "var atan = stdlib.Math.atan;\n" \
+ "var cos = stdlib.Math.cos;\n" \
+ "var sin = stdlib.Math.sin;\n" \
+ "var tan = stdlib.Math.tan;\n" \
+ "var exp = stdlib.Math.exp;\n" \
+ "var log = stdlib.Math.log;\n" \
+ "var ceil = stdlib.Math.ceil;\n" \
+ "var floor = stdlib.Math.floor;\n" \
+ "var sqrt = stdlib.Math.sqrt;\n" \
+ "var min = stdlib.Math.min;\n" \
+ "var max = stdlib.Math.max;\n" \
+ "var atan2 = stdlib.Math.atan2;\n" \
+ "var pow = stdlib.Math.pow;\n" \
+ "var abs = stdlib.Math.abs;\n" \
+ "var imul = stdlib.Math.imul;\n" \
+ "var fround = stdlib.Math.fround;\n" \
+ "var E = stdlib.Math.E;\n" \
+ "var LN10 = stdlib.Math.LN10;\n" \
+ "var LN2 = stdlib.Math.LN2;\n" \
+ "var LOG2E = stdlib.Math.LOG2E;\n" \
+ "var LOG10E = stdlib.Math.LOG10E;\n" \
+ "var PI = stdlib.Math.PI;\n" \
+ "var SQRT1_2 = stdlib.Math.SQRT1_2;\n" \
+ "var SQRT2 = stdlib.Math.SQRT2;\n"
+
+
+#define HARNESS_HEAP() \
+ "var u8 = new stdlib.Uint8Array(buffer);\n" \
+ "var i8 = new stdlib.Int8Array(buffer);\n" \
+ "var u16 = new stdlib.Uint16Array(buffer);\n" \
+ "var i16 = new stdlib.Int16Array(buffer);\n" \
+ "var u32 = new stdlib.Uint32Array(buffer);\n" \
+ "var i32 = new stdlib.Int32Array(buffer);\n" \
+ "var f32 = new stdlib.Float32Array(buffer);\n" \
+ "var f64 = new stdlib.Float64Array(buffer);\n"
+
+
+#define HARNESS_PREAMBLE() \
+ const char test_function[] = \
+ "function Module(stdlib, foreign, buffer) {\n" \
+ "\"use asm\";\n" HARNESS_STDLIB() HARNESS_HEAP()
+
+
+#define HARNESS_POSTAMBLE() \
+ "return { foo: foo };\n" \
+ "}\n";
+
+
+#define CHECK_VAR_MATH_SHORTCUT(name, type) \
+ CHECK_EXPR(Assignment, type) { \
+ CHECK_VAR(name, type); \
+ CHECK_EXPR(Property, type) { \
+ CHECK_EXPR(Property, Bounds::Unbounded()) { \
+ CHECK_VAR(stdlib, Bounds::Unbounded()); \
+ CHECK_EXPR(Literal, Bounds::Unbounded()); \
+ } \
+ CHECK_EXPR(Literal, Bounds::Unbounded()); \
+ } \
+ }
+
+
+#define CHECK_VAR_SHORTCUT(name, type) \
+ CHECK_EXPR(Assignment, type) { \
+ CHECK_VAR(name, type); \
+ CHECK_EXPR(Property, type) { \
+ CHECK_VAR(stdlib, Bounds::Unbounded()); \
+ CHECK_EXPR(Literal, Bounds::Unbounded()); \
+ } \
+ }
+
+
+#define CHECK_VAR_NEW_SHORTCUT(name, type) \
+ CHECK_EXPR(Assignment, type) { \
+ CHECK_VAR(name, type); \
+ CHECK_EXPR(CallNew, type) { \
+ CHECK_EXPR(Property, Bounds::Unbounded()) { \
+ CHECK_VAR(stdlib, Bounds::Unbounded()); \
+ CHECK_EXPR(Literal, Bounds::Unbounded()); \
+ } \
+ CHECK_VAR(buffer, Bounds::Unbounded()); \
+ } \
+ }
+
+
+namespace {
+
+void CheckStdlibShortcuts(Zone* zone, ZoneVector<ExpressionTypeEntry>& types,
+ size_t& index, int& depth, ZoneTypeCache& cache) {
+ // var exp = stdlib.*; (D * 12)
+ CHECK_VAR_SHORTCUT(Infinity, Bounds(cache.kFloat64));
+ CHECK_VAR_SHORTCUT(NaN, Bounds(cache.kFloat64));
+ // var x = stdlib.Math.x; D2D
+ CHECK_VAR_MATH_SHORTCUT(acos, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(asin, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(atan, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(cos, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(sin, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(tan, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(exp, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(log, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(ceil, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(floor, FUNC_D2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(sqrt, FUNC_D2D_TYPE);
+ // var exp = stdlib.Math.*; (DD2D * 12)
+ CHECK_VAR_MATH_SHORTCUT(min, FUNC_DD2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(max, FUNC_DD2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(atan2, FUNC_DD2D_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(pow, FUNC_DD2D_TYPE);
+ // Special ones.
+ CHECK_VAR_MATH_SHORTCUT(abs, FUNC_N2N_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(imul, FUNC_II2I_TYPE);
+ CHECK_VAR_MATH_SHORTCUT(fround, FUNC_N2F_TYPE);
+ // var exp = stdlib.Math.*; (D * 12)
+ CHECK_VAR_MATH_SHORTCUT(E, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(LN10, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(LN2, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(LOG2E, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(LOG10E, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(PI, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(SQRT1_2, Bounds(cache.kFloat64));
+ CHECK_VAR_MATH_SHORTCUT(SQRT2, Bounds(cache.kFloat64));
+ // var values = new stdlib.*Array(buffer);
+ CHECK_VAR_NEW_SHORTCUT(u8, Bounds(cache.kUint8Array));
+ CHECK_VAR_NEW_SHORTCUT(i8, Bounds(cache.kInt8Array));
+ CHECK_VAR_NEW_SHORTCUT(u16, Bounds(cache.kUint16Array));
+ CHECK_VAR_NEW_SHORTCUT(i16, Bounds(cache.kInt16Array));
+ CHECK_VAR_NEW_SHORTCUT(u32, Bounds(cache.kUint32Array));
+ CHECK_VAR_NEW_SHORTCUT(i32, Bounds(cache.kInt32Array));
+ CHECK_VAR_NEW_SHORTCUT(f32, Bounds(cache.kFloat32Array));
+ CHECK_VAR_NEW_SHORTCUT(f64, Bounds(cache.kFloat64Array));
+}
+}
+
+
+#define CHECK_FUNC_TYPES_BEGIN(func) \
+ HARNESS_PREAMBLE() \
+ func "\n" HARNESS_POSTAMBLE(); \
+ \
+ v8::V8::Initialize(); \
+ HandleAndZoneScope handles; \
+ Zone* zone = handles.main_zone(); \
+ ZoneVector<ExpressionTypeEntry> types(zone); \
+ CHECK_EQ("", Validate(zone, test_function, &types)); \
+ ZoneTypeCache cache; \
+ \
+ CHECK_TYPES_BEGIN { \
+ /* Module. */ \
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+#define CHECK_FUNC_TYPES_END_1() \
+ /* "use asm"; */ \
+ CHECK_EXPR(Literal, Bounds(Type::String())); \
+ /* stdlib shortcuts. */ \
+ CheckStdlibShortcuts(zone, types, index, depth, cache);
+
+
+#define CHECK_FUNC_TYPES_END_2() \
+ /* return { foo: foo }; */ \
+ CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) { \
+ CHECK_VAR(foo, FUNC_V_TYPE); \
+ } \
+ } \
+ } \
+ CHECK_TYPES_END
+
+
+#define CHECK_FUNC_TYPES_END \
+ CHECK_FUNC_TYPES_END_1(); \
+ CHECK_FUNC_TYPES_END_2();
+
+
+#define CHECK_FUNC_ERROR(func, message) \
+ HARNESS_PREAMBLE() \
+ func "\n" HARNESS_POSTAMBLE(); \
+ \
+ v8::V8::Initialize(); \
+ HandleAndZoneScope handles; \
+ Zone* zone = handles.main_zone(); \
+ ZoneVector<ExpressionTypeEntry> types(zone); \
+ CHECK_EQ(message, Validate(zone, test_function, &types));
+
+
+TEST(BareHarness) {
+ CHECK_FUNC_TYPES_BEGIN("function foo() {}") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {}
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(ReturnVoid) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { return; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ // return undefined;
+ CHECK_EXPR(Literal, Bounds(Type::Undefined()));
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Call, Bounds(Type::Undefined())) {
+ CHECK_VAR(bar, FUNC_V_TYPE);
+ }
+ }
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(ReturnInt32Literal) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { return 1; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ // return 1;
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Call, Bounds(cache.kInt32)) { CHECK_VAR(bar, FUNC_I_TYPE); }
+ }
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(ReturnFloat64Literal) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { return 1.0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
+ // return 1.0;
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Call, Bounds(cache.kFloat64)) { CHECK_VAR(bar, FUNC_D_TYPE); }
+ }
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(ReturnFloat32Literal) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { return fround(1.0); }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_F_TYPE) {
+ // return fround(1.0);
+ CHECK_EXPR(Call, Bounds(cache.kFloat32)) {
+ CHECK_VAR(fround, FUNC_N2F_TYPE);
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Call, Bounds(cache.kFloat32)) { CHECK_VAR(bar, FUNC_F_TYPE); }
+ }
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(ReturnFloat64Var) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1.0; return +x; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_D_TYPE) {
+ // return 1.0;
+ CHECK_EXPR(Assignment, Bounds(cache.kFloat64)) {
+ CHECK_VAR(x, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ // return 1.0;
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kFloat64)) {
+ CHECK_VAR(x, Bounds(cache.kFloat64));
+ CHECK_EXPR(Literal, Bounds(cache.kFloat64));
+ }
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Call, Bounds(cache.kFloat64)) { CHECK_VAR(bar, FUNC_D_TYPE); }
+ }
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(Addition2) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 2; return (x+y)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(Addition4) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = 2; return (x+y+x+y)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ }
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ }
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(Multiplication2) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; var y = 2; return (x*y)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: direct integer multiply forbidden\n");
+}
+
+
+TEST(Division4) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 1; var y = 2; return (x/y/x/y)|0; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: too many consecutive multiplicative ops\n");
+}
+
+
+TEST(Load1) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function bar() { var x = 1; var y = i8[x>>0]|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(Property, Bounds(cache.kInt8)) {
+ CHECK_VAR(i8, Bounds(cache.kInt8Array));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END
+}
+
+
+TEST(FunctionTables) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "function func1(x) { x = x | 0; return (x * 5) | 0; }\n"
+ "function func2(x) { x = x | 0; return (x * 25) | 0; }\n"
+ "var table1 = [func1, func2];\n"
+ "function bar(x, y) { x = x | 0; y = y | 0;\n"
+ " return table1[x & 1](y)|0; }\n"
+ "function foo() { bar(1, 2); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_I2I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_II2I_TYPE) {
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds(cache.kInt32)) {
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(Call, Bounds(cache.kInt32)) {
+ CHECK_EXPR(Property, FUNC_I2I_TYPE) {
+ CHECK_VAR(table1, FUNC_I2I_ARRAY_TYPE);
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_VAR(x, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_VAR(y, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_SKIP();
+ }
+ CHECK_FUNC_TYPES_END_1();
+ CHECK_EXPR(Assignment, FUNC_I2I_ARRAY_TYPE) {
+ CHECK_VAR(table1, FUNC_I2I_ARRAY_TYPE);
+ CHECK_EXPR(ArrayLiteral, FUNC_I2I_ARRAY_TYPE) {
+ CHECK_VAR(func1, FUNC_I2I_TYPE);
+ CHECK_VAR(func2, FUNC_I2I_TYPE);
+ }
+ }
+ CHECK_FUNC_TYPES_END_2();
+}
+
+
+TEST(BadFunctionTable) {
+ CHECK_FUNC_ERROR(
+ "function func1(x) { x = x | 0; return (x * 5) | 0; }\n"
+ "var table1 = [func1, 1];\n"
+ "function bar(x, y) { x = x | 0; y = y | 0;\n"
+ " return table1[x & 1](y)|0; }\n"
+ "function foo() { bar(1, 2); }",
+ "asm: line 40: array component expected to be a function\n");
+}
+
+
+TEST(MissingParameterTypes) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { var y = 1; }\n"
+ "function foo() { bar(2); }",
+ "asm: line 39: missing parameter type annotations\n");
+}
+
+
+TEST(InvalidTypeAnnotationBinaryOpDiv) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { x = x / 4; }\n"
+ "function foo() { bar(2); }",
+ "asm: line 39: invalid type annotation on binary op\n");
+}
+
+
+TEST(InvalidTypeAnnotationBinaryOpMul) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { x = x * 4.0; }\n"
+ "function foo() { bar(2); }",
+ "asm: line 39: invalid type annotation on binary op\n");
+}
+
+
+TEST(InvalidArgumentCount) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { return fround(4, 5); }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: invalid argument count calling fround\n");
+}
+
+
+TEST(InvalidTypeAnnotationArity) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { x = max(x); }\n"
+ "function foo() { bar(3); }",
+ "asm: line 39: only fround allowed on expression annotations\n");
+}
+
+
+TEST(InvalidTypeAnnotationOnlyFround) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { x = sin(x); }\n"
+ "function foo() { bar(3); }",
+ "asm: line 39: only fround allowed on expression annotations\n");
+}
+
+
+TEST(InvalidTypeAnnotation) {
+ CHECK_FUNC_ERROR(
+ "function bar(x) { x = (x+x)(x); }\n"
+ "function foo() { bar(3); }",
+ "asm: line 39: invalid type annotation\n");
+}
+
+
+TEST(WithStatement) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = 0; with (x) { x = x + 1; } }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: bad with statement\n");
+}
+
+
+TEST(NestedFunction) {
+ CHECK_FUNC_ERROR(
+ "function bar() { function x() { return 1; } }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: function declared inside another\n");
+}
+
+
+TEST(UnboundVariable) {
+ CHECK_FUNC_ERROR(
+ "function bar() { var x = y; }\n"
+ "function foo() { bar(); }",
+ "asm: line 39: unbound variable\n");
+}
+
+
+TEST(ForeignFunction) {
+ CHECK_FUNC_TYPES_BEGIN(
+ "var baz = foreign.baz;\n"
+ "function bar() { return baz(1, 2)|0; }\n"
+ "function foo() { bar(); }") {
+ CHECK_EXPR(FunctionLiteral, FUNC_I_TYPE) {
+ CHECK_EXPR(BinaryOperation, Bounds(cache.kInt32)) {
+ CHECK_EXPR(Call, Bounds(Type::Number(zone))) {
+ CHECK_VAR(baz, Bounds(Type::Any()));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ CHECK_EXPR(Literal, Bounds(cache.kInt32));
+ }
+ }
+ CHECK_EXPR(FunctionLiteral, FUNC_V_TYPE) {
+ CHECK_EXPR(Call, Bounds(cache.kInt32)) { CHECK_VAR(bar, FUNC_I_TYPE); }
+ }
+ }
+ CHECK_FUNC_TYPES_END_1()
+ CHECK_EXPR(Assignment, Bounds(Type::Any())) {
+ CHECK_VAR(baz, Bounds(Type::Any()));
+ CHECK_EXPR(Property, Bounds(Type::Any())) {
+ CHECK_VAR(foreign, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ CHECK_FUNC_TYPES_END_2()
+}
+
+
+TEST(BadExports) {
+ HARNESS_PREAMBLE()
+ "function foo() {};\n"
+ "return {foo: foo, bar: 1};"
+ "}\n";
+
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ Zone* zone = handles.main_zone();
+ ZoneVector<ExpressionTypeEntry> types(zone);
+ CHECK_EQ("asm: line 40: non-function in function table\n",
+ Validate(zone, test_function, &types));
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 3c2f840058..9b511851dd 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -191,12 +191,12 @@ static void InitializeVM() {
RESET(); \
START_AFTER_RESET();
-#define RUN() \
- CpuFeatures::FlushICache(buf, masm.SizeOfGeneratedCode()); \
- { \
- void (*test_function)(void); \
- memcpy(&test_function, &buf, sizeof(buf)); \
- test_function(); \
+#define RUN() \
+ Assembler::FlushICache(isolate, buf, masm.SizeOfGeneratedCode()); \
+ { \
+ void (*test_function)(void); \
+ memcpy(&test_function, &buf, sizeof(buf)); \
+ test_function(); \
}
#define END() \
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 63c9172f56..ab18b35e4a 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -1153,17 +1153,17 @@ TEST(MIPS13) {
MacroAssembler assm(isolate, NULL, 0);
__ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in)));
- __ Cvt_d_uw(f10, t0, f22);
+ __ Cvt_d_uw(f10, t0, f4);
__ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
- __ Trunc_uw_d(f10, f10, f22);
+ __ Trunc_uw_d(f10, f10, f4);
__ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
__ sw(t0, MemOperand(a0, offsetof(T, cvt_big_in)));
- __ Cvt_d_uw(f8, t0, f22);
+ __ Cvt_d_uw(f8, t0, f4);
__ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
- __ Trunc_uw_d(f8, f8, f22);
+ __ Trunc_uw_d(f8, f8, f4);
__ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out)));
__ jr(ra);
@@ -2509,7 +2509,7 @@ TEST(sqrt_rsqrt_recip) {
__ rsqrt_d(f14, f8);
__ rsqrt_s(f16, f2);
__ recip_d(f18, f8);
- __ recip_s(f20, f2);
+ __ recip_s(f4, f2);
}
__ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
@@ -2517,7 +2517,7 @@ TEST(sqrt_rsqrt_recip) {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) );
__ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)) );
- __ swc1(f20, MemOperand(a0, offsetof(TestFloat, resultS2)) );
+ __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) );
__ sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)) );
}
__ jr(ra);
@@ -2706,12 +2706,13 @@ TEST(mov) {
5.3, -5.3, 5.3, -2.9
};
- __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
+ __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
- __ mov_s(f18, f6);
- __ mov_d(f20, f2);
- __ swc1(f18, MemOperand(a0, offsetof(TestFloat, d)) );
- __ sdc1(f20, MemOperand(a0, offsetof(TestFloat, b)) );
+ __ mov_s(f8, f6);
+ __ mov_d(f10, f4);
+ __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
+ __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)) );
+
__ jr(ra);
__ nop();
@@ -4678,7 +4679,6 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
Label stop_execution;
__ li(v0, 0);
__ li(t1, 0x66);
- __ push(ra);
__ addiu(v0, v0, 0x1); // <-- offset = -32
__ addiu(v0, v0, 0x2);
@@ -4698,7 +4698,6 @@ uint64_t run_beqzc(int32_t value, int32_t offset) {
__ nop();
__ bind(&stop_execution);
- __ pop(ra);
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 00e364cfce..ee6755f259 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -590,11 +590,19 @@ TEST(MIPS6) {
USE(dummy);
CHECK_EQ(static_cast<int32_t>(0x11223344), t.r1);
- CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
- CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
- CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
- CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
- CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
+ if (kArchEndian == kLittle) {
+ CHECK_EQ(static_cast<int32_t>(0x3344), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xffffbbcc), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x0000bbcc), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xffffffcc), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x3333bbcc), t.r6);
+ } else {
+ CHECK_EQ(static_cast<int32_t>(0x1122), t.r2);
+ CHECK_EQ(static_cast<int32_t>(0xffff99aa), t.r3);
+ CHECK_EQ(static_cast<int32_t>(0x000099aa), t.r4);
+ CHECK_EQ(static_cast<int32_t>(0xffffff99), t.r5);
+ CHECK_EQ(static_cast<int32_t>(0x99aa3333), t.r6);
+ }
}
@@ -1026,25 +1034,47 @@ TEST(MIPS11) {
Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
USE(dummy);
- CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
- CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
- CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
- CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
-
- CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
- CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
- CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
-
- CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
- CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
- CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
-
- CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
- CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
- CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
- CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
+ if (kArchEndian == kLittle) {
+ CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_3);
+ } else {
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwl_0);
+ CHECK_EQ(static_cast<int32_t>(0x223344dd), t.lwl_1);
+ CHECK_EQ(static_cast<int32_t>(0x3344ccdd), t.lwl_2);
+ CHECK_EQ(static_cast<int32_t>(0x44bbccdd), t.lwl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbcc11), t.lwr_0);
+ CHECK_EQ(static_cast<int32_t>(0xaabb1122), t.lwr_1);
+ CHECK_EQ(static_cast<int32_t>(0xaa112233), t.lwr_2);
+ CHECK_EQ(static_cast<int32_t>(0x11223344), t.lwr_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swl_0);
+ CHECK_EQ(static_cast<int32_t>(0x11aabbcc), t.swl_1);
+ CHECK_EQ(static_cast<int32_t>(0x1122aabb), t.swl_2);
+ CHECK_EQ(static_cast<int32_t>(0x112233aa), t.swl_3);
+
+ CHECK_EQ(static_cast<int32_t>(0xdd223344), t.swr_0);
+ CHECK_EQ(static_cast<int32_t>(0xccdd3344), t.swr_1);
+ CHECK_EQ(static_cast<int32_t>(0xbbccdd44), t.swr_2);
+ CHECK_EQ(static_cast<int32_t>(0xaabbccdd), t.swr_3);
+ }
}
}
@@ -1156,17 +1186,17 @@ TEST(MIPS13) {
MacroAssembler assm(isolate, NULL, 0);
__ sw(a4, MemOperand(a0, offsetof(T, cvt_small_in)));
- __ Cvt_d_uw(f10, a4, f22);
+ __ Cvt_d_uw(f10, a4, f4);
__ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
- __ Trunc_uw_d(f10, f10, f22);
+ __ Trunc_uw_d(f10, f10, f4);
__ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
__ sw(a4, MemOperand(a0, offsetof(T, cvt_big_in)));
- __ Cvt_d_uw(f8, a4, f22);
+ __ Cvt_d_uw(f8, a4, f4);
__ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
- __ Trunc_uw_d(f8, f8, f22);
+ __ Trunc_uw_d(f8, f8, f4);
__ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out)));
__ jr(ra);
@@ -2610,12 +2640,12 @@ TEST(sqrt_rsqrt_recip) {
__ rsqrt_d(f14, f8);
__ rsqrt_s(f16, f2);
__ recip_d(f18, f8);
- __ recip_s(f20, f2);
+ __ recip_s(f4, f2);
__ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
__ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
__ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) );
__ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)) );
- __ swc1(f20, MemOperand(a0, offsetof(TestFloat, resultS2)) );
+ __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) );
__ sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)) );
__ jr(ra);
__ nop();
@@ -2802,12 +2832,12 @@ TEST(mov) {
5.3, -5.3, 5.3, -2.9
};
- __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
+ __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
__ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
- __ mov_s(f18, f6);
- __ mov_d(f20, f2);
- __ swc1(f18, MemOperand(a0, offsetof(TestFloat, d)) );
- __ sdc1(f20, MemOperand(a0, offsetof(TestFloat, b)) );
+ __ mov_s(f8, f6);
+ __ mov_d(f10, f4);
+ __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
+ __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)) );
__ jr(ra);
__ nop();
diff --git a/deps/v8/test/cctest/test-ast-expression-visitor.cc b/deps/v8/test/cctest/test-ast-expression-visitor.cc
new file mode 100644
index 0000000000..8bf4c600dc
--- /dev/null
+++ b/deps/v8/test/cctest/test-ast-expression-visitor.cc
@@ -0,0 +1,392 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/ast-expression-visitor.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/expression-type-collector.h"
+#include "test/cctest/expression-type-collector-macros.h"
+
+using namespace v8::internal;
+
+namespace {
+
+static void CollectTypes(HandleAndZoneScope* handles, const char* source,
+ ZoneVector<ExpressionTypeEntry>* dst) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::Handle<i::String> source_code =
+ factory->NewStringFromUtf8(i::CStrVector(source)).ToHandleChecked();
+
+ i::Handle<i::Script> script = factory->NewScript(source_code);
+
+ i::ParseInfo info(handles->main_zone(), script);
+ i::Parser parser(&info);
+ parser.set_allow_harmony_arrow_functions(true);
+ parser.set_allow_harmony_sloppy(true);
+ info.set_global();
+ info.set_lazy(false);
+ info.set_allow_lazy_parsing(false);
+ info.set_toplevel(true);
+
+ CHECK(i::Compiler::ParseAndAnalyze(&info));
+
+ ExpressionTypeCollector(
+ isolate, handles->main_zone(),
+ info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun(), dst)
+ .Run();
+}
+}
+
+
+TEST(VisitExpressions) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ const char test_function[] =
+ "function GeometricMean(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ "\n"
+ " var exp = stdlib.Math.exp;\n"
+ " var log = stdlib.Math.log;\n"
+ " var values = new stdlib.Float64Array(buffer);\n"
+ "\n"
+ " function logSum(start, end) {\n"
+ " start = start|0;\n"
+ " end = end|0;\n"
+ "\n"
+ " var sum = 0.0, p = 0, q = 0;\n"
+ "\n"
+ " // asm.js forces byte addressing of the heap by requiring shifting "
+ "by 3\n"
+ " for (p = start << 3, q = end << 3; (p|0) < (q|0); p = (p + 8)|0) {\n"
+ " sum = sum + +log(values[p>>3]);\n"
+ " }\n"
+ "\n"
+ " return +sum;\n"
+ " }\n"
+ "\n"
+ " function geometricMean(start, end) {\n"
+ " start = start|0;\n"
+ " end = end|0;\n"
+ "\n"
+ " return +exp(+logSum(start, end) / +((end - start)|0));\n"
+ " }\n"
+ "\n"
+ " return { geometricMean: geometricMean };\n"
+ "}\n";
+
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ // function logSum
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(start, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(start, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(end, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(end, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(sum, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(p, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(q, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ // for (p = start << 3, q = end << 3;
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(p, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(start, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(q, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(end, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ // (p|0) < (q|0);
+ CHECK_EXPR(CompareOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(p, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(q, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // p = (p + 8)|0) {\n"
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(p, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(p, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // sum = sum + +log(values[p>>3]);
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(sum, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(sum, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(Call, Bounds::Unbounded()) {
+ CHECK_VAR(log, Bounds::Unbounded());
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(values, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(p, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ // return +sum;
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(sum, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // function geometricMean
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(start, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(start, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(end, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(end, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // return +exp(+logSum(start, end) / +((end - start)|0));
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(Call, Bounds::Unbounded()) {
+ CHECK_VAR(exp, Bounds::Unbounded());
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(Call, Bounds::Unbounded()) {
+ CHECK_VAR(logSum, Bounds::Unbounded());
+ CHECK_VAR(start, Bounds::Unbounded());
+ CHECK_VAR(end, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ CHECK_VAR(end, Bounds::Unbounded());
+ CHECK_VAR(start, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // "use asm";
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ // var exp = stdlib.Math.exp;
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(exp, Bounds::Unbounded());
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(stdlib, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // var log = stdlib.Math.log;
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(log, Bounds::Unbounded());
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(stdlib, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ // var values = new stdlib.Float64Array(buffer);
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(values, Bounds::Unbounded());
+ CHECK_EXPR(CallNew, Bounds::Unbounded()) {
+ CHECK_EXPR(Property, Bounds::Unbounded()) {
+ CHECK_VAR(stdlib, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_VAR(buffer, Bounds::Unbounded());
+ }
+ }
+ // return { geometricMean: geometricMean };
+ CHECK_EXPR(ObjectLiteral, Bounds::Unbounded()) {
+ CHECK_VAR(geometricMean, Bounds::Unbounded());
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
+
+
+TEST(VisitEmptyForStatment) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ // Check that traversing an empty for statement works.
+ const char test_function[] =
+ "function foo() {\n"
+ " for (;;) {}\n"
+ "}\n";
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {}
+ }
+ CHECK_TYPES_END
+}
+
+
+TEST(VisitSwitchStatment) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ // Check that traversing a switch with a default works.
+ const char test_function[] =
+ "function foo() {\n"
+ " switch (0) { case 1: break; default: break; }\n"
+ "}\n";
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(.switch_tag, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ CHECK_VAR(.switch_tag, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ CHECK_TYPES_END
+}
+
+
+TEST(VisitThrow) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ // Check that traversing an empty for statement works.
+ const char test_function[] =
+ "function foo() {\n"
+ " throw 123;\n"
+ "}\n";
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(Throw, Bounds::Unbounded()) {
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
+
+
+TEST(VisitYield) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ // Check that traversing an empty for statement works.
+ const char test_function[] =
+ "function* foo() {\n"
+ " yield 123;\n"
+ "}\n";
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ // Generator function yields generator on entry.
+ CHECK_EXPR(Yield, Bounds::Unbounded()) {
+ CHECK_VAR(.generator_object, Bounds::Unbounded());
+ CHECK_EXPR(Assignment, Bounds::Unbounded()) {
+ CHECK_VAR(.generator_object, Bounds::Unbounded());
+ CHECK_EXPR(CallRuntime, Bounds::Unbounded());
+ }
+ }
+ // Then yields undefined.
+ CHECK_EXPR(Yield, Bounds::Unbounded()) {
+ CHECK_VAR(.generator_object, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ // Then yields 123.
+ CHECK_EXPR(Yield, Bounds::Unbounded()) {
+ CHECK_VAR(.generator_object, Bounds::Unbounded());
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
+
+
+TEST(VisitSkipping) {
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ // Check that traversing an empty for statement works.
+ const char test_function[] =
+ "function foo(x) {\n"
+ " return (x + x) + 1;\n"
+ "}\n";
+ CollectTypes(&handles, test_function, &types);
+ CHECK_TYPES_BEGIN {
+ CHECK_EXPR(FunctionLiteral, Bounds::Unbounded()) {
+ CHECK_EXPR(BinaryOperation, Bounds::Unbounded()) {
+ // Skip x + x
+ CHECK_SKIP();
+ CHECK_EXPR(Literal, Bounds::Unbounded());
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
diff --git a/deps/v8/test/cctest/test-circular-queue.cc b/deps/v8/test/cctest/test-circular-queue.cc
index 8d0d4f982a..a5bcb486af 100644
--- a/deps/v8/test/cctest/test-circular-queue.cc
+++ b/deps/v8/test/cctest/test-circular-queue.cc
@@ -29,7 +29,7 @@
#include "src/v8.h"
-#include "src/circular-queue-inl.h"
+#include "src/profiler/circular-queue-inl.h"
#include "test/cctest/cctest.h"
using i::SamplingCircularQueue;
diff --git a/deps/v8/test/cctest/test-code-stubs-arm.cc b/deps/v8/test/cctest/test-code-stubs-arm.cc
index 80403440da..a9856f5245 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm.cc
@@ -76,7 +76,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 0;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ push(reg);
@@ -126,7 +126,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(&desc);
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-arm64.cc b/deps/v8/test/cctest/test-code-stubs-arm64.cc
index 8b38b96600..8f40c9052a 100644
--- a/deps/v8/test/cctest/test-code-stubs-arm64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-arm64.cc
@@ -72,7 +72,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 0;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
queue.Queue(reg);
@@ -122,7 +122,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
CodeDesc desc;
masm.GetCode(&desc);
- CpuFeatures::FlushICache(buffer, actual_size);
+ Assembler::FlushICache(isolate, buffer, actual_size);
return (reinterpret_cast<ConvertDToIFunc>(
reinterpret_cast<intptr_t>(buffer)));
}
diff --git a/deps/v8/test/cctest/test-code-stubs-ia32.cc b/deps/v8/test/cctest/test-code-stubs-ia32.cc
index 0b4a8d417b..ef44f5420b 100644
--- a/deps/v8/test/cctest/test-code-stubs-ia32.cc
+++ b/deps/v8/test/cctest/test-code-stubs-ia32.cc
@@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
diff --git a/deps/v8/test/cctest/test-code-stubs-mips.cc b/deps/v8/test/cctest/test-code-stubs-mips.cc
index 796aa1d610..10704efdca 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips.cc
@@ -62,6 +62,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save callee save registers.
__ MultiPush(kCalleeSaved | ra.bit());
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
// For softfp, move the input value into f12.
if (IsMipsSoftFloatABI) {
__ Move(f12, a0, a1);
@@ -74,7 +79,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 2;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ push(reg);
@@ -117,6 +122,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ Branch(&ok, eq, v0, Operand(zero_reg));
__ bind(&ok);
+ // Restore callee-saved FPU registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
// Restore callee save registers.
__ MultiPop(kCalleeSaved | ra.bit());
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index 9f146f65fd..a8dd13cc07 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -62,6 +62,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save callee save registers.
__ MultiPush(kCalleeSaved | ra.bit());
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
// For softfp, move the input value into f12.
if (IsMipsSoftFloatABI) {
__ Move(f12, a0, a1);
@@ -74,7 +79,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int source_reg_offset = kDoubleSize;
int reg_num = 2;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
__ push(reg);
@@ -117,6 +122,9 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
__ Branch(&ok, eq, v0, Operand(zero_reg));
__ bind(&ok);
+ // Restore callee-saved FPU registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
// Restore callee save registers.
__ MultiPop(kCalleeSaved | ra.bit());
diff --git a/deps/v8/test/cctest/test-code-stubs-x64.cc b/deps/v8/test/cctest/test-code-stubs-x64.cc
index b58b073f3b..9eb153f651 100644
--- a/deps/v8/test/cctest/test-code-stubs-x64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x64.cc
@@ -74,7 +74,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Save registers make sure they don't get clobbered.
int reg_num = 0;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(rsp) && !reg.is(rbp) && !reg.is(destination_reg)) {
__ pushq(reg);
diff --git a/deps/v8/test/cctest/test-code-stubs-x87.cc b/deps/v8/test/cctest/test-code-stubs-x87.cc
index 0b4a8d417b..ef44f5420b 100644
--- a/deps/v8/test/cctest/test-code-stubs-x87.cc
+++ b/deps/v8/test/cctest/test-code-stubs-x87.cc
@@ -70,7 +70,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
int param_offset = 7 * kPointerSize;
// Save registers make sure they don't get clobbered.
int reg_num = 0;
- for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+ for (; reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
Register reg = Register::FromAllocationIndex(reg_num);
if (!reg.is(esp) && !reg.is(ebp) && !reg.is(destination_reg)) {
__ push(reg);
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 17eec07376..f7d1e1a5b1 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -32,8 +32,8 @@
#include "include/v8-profiler.h"
#include "src/base/platform/platform.h"
#include "src/base/smart-pointers.h"
-#include "src/cpu-profiler-inl.h"
#include "src/deoptimizer.h"
+#include "src/profiler/cpu-profiler-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 2bcc625a95..89372c07ee 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(jochen): Remove this after the setting is turned on globally.
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "src/v8.h"
#include "src/global-handles.h"
@@ -173,18 +176,24 @@ TEST(DateCacheVersion) {
v8::Isolate* isolate = CcTest::isolate();
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
v8::Context::Scope context_scope(context);
- v8::Handle<v8::Array> date_cache_version =
- v8::Handle<v8::Array>::Cast(CompileRun("%DateCacheVersion()"));
+ v8::Local<v8::Array> date_cache_version =
+ v8::Local<v8::Array>::Cast(CompileRun("%DateCacheVersion()"));
CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
- CHECK(date_cache_version->Get(0)->IsNumber());
- CHECK_EQ(0.0, date_cache_version->Get(0)->NumberValue());
+ CHECK(date_cache_version->Get(context, 0).ToLocalChecked()->IsNumber());
+ CHECK_EQ(0.0, date_cache_version->Get(context, 0)
+ .ToLocalChecked()
+ ->NumberValue(context)
+ .FromJust());
v8::Date::DateTimeConfigurationChangeNotification(isolate);
CHECK_EQ(1, static_cast<int32_t>(date_cache_version->Length()));
- CHECK(date_cache_version->Get(0)->IsNumber());
- CHECK_EQ(1.0, date_cache_version->Get(0)->NumberValue());
+ CHECK(date_cache_version->Get(context, 0).ToLocalChecked()->IsNumber());
+ CHECK_EQ(1.0, date_cache_version->Get(context, 0)
+ .ToLocalChecked()
+ ->NumberValue(context)
+ .FromJust());
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 8f569ae6fe..e2ba3f5d1c 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -6363,16 +6363,18 @@ TEST(SyntaxErrorMessageOnSyntaxException) {
v8::String::NewFromUtf8(env->GetIsolate(), "/sel\\/: \\"));
CHECK_EQ(2, compile_error_event_count);
- v8::Script::Compile(
- v8::String::NewFromUtf8(env->GetIsolate(), "JSON.parse('1234:')"));
+ v8::Local<v8::Script> script = v8::Script::Compile(
+ v8::String::NewFromUtf8(env->GetIsolate(), "JSON.parse('1234:')"));
CHECK_EQ(2, compile_error_event_count);
+ script->Run();
+ CHECK_EQ(3, compile_error_event_count);
v8::Script::Compile(
v8::String::NewFromUtf8(env->GetIsolate(), "new RegExp('/\\/\\\\');"));
- CHECK_EQ(2, compile_error_event_count);
+ CHECK_EQ(3, compile_error_event_count);
v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "throw 1;"));
- CHECK_EQ(2, compile_error_event_count);
+ CHECK_EQ(3, compile_error_event_count);
}
diff --git a/deps/v8/test/cctest/test-experimental-extra.js b/deps/v8/test/cctest/test-experimental-extra.js
new file mode 100644
index 0000000000..a29fc7688c
--- /dev/null
+++ b/deps/v8/test/cctest/test-experimental-extra.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function (global, binding) {
+ 'use strict';
+ binding.testExperimentalExtraShouldReturnTen = function () {
+ return 10;
+ };
+
+ binding.testExperimentalExtraShouldCallToRuntime = function() {
+ return binding.runtime(3);
+ };
+})
diff --git a/deps/v8/test/cctest/test-extra.js b/deps/v8/test/cctest/test-extra.js
index f943ea6c4e..123c8920d0 100644
--- a/deps/v8/test/cctest/test-extra.js
+++ b/deps/v8/test/cctest/test-extra.js
@@ -2,13 +2,53 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-(function (global, binding) {
+(function (global, binding, v8) {
'use strict';
- binding.testExtraShouldReturnFive = function () {
+ binding.testExtraShouldReturnFive = function() {
return 5;
};
binding.testExtraShouldCallToRuntime = function() {
return binding.runtime(3);
};
+
+ // Exercise all of the extras utils:
+ // - v8.createPrivateSymbol
+ // - v8.simpleBind, v8.uncurryThis
+ // - v8.InternalPackedArray
+ // - v8.createPromise, v8.resolvePromise, v8.rejectPromise
+
+ const Object = global.Object;
+ const hasOwn = v8.uncurryThis(Object.prototype.hasOwnProperty);
+
+ const Function = global.Function;
+ const call = v8.uncurryThis(Function.prototype.call);
+ const apply = v8.uncurryThis(Function.prototype.apply);
+
+ const Promise = global.Promise;
+ const Promise_resolve = v8.simpleBind(Promise.resolve, Promise);
+
+ binding.testExtraCanUseUtils = function() {
+ const fulfilledPromise = v8.createPromise();
+ v8.resolvePromise(
+ fulfilledPromise,
+ hasOwn({ test: 'test' }, 'test') ? 1 : -1
+ );
+
+ const fulfilledPromise2 = Promise_resolve(call(function (arg1) {
+ return (this.prop === arg1 && arg1 === 'value') ? 2 : -1;
+ }, { prop: 'value' }, 'value'));
+
+ const rejectedPromise = v8.createPromise();
+ v8.rejectPromise(rejectedPromise, apply(function (arg1, arg2) {
+ return (arg1 === arg2 && arg2 === 'x') ? 3 : -1;
+ }, null, new v8.InternalPackedArray('x', 'x')));
+
+ return {
+ privateSymbol: v8.createPrivateSymbol('sym'),
+ fulfilledPromise, // should be fulfilled with 1
+ fulfilledPromise2, // should be fulfilled with 2
+ rejectedPromise // should be rejected with 3
+ };
+ };
})
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index b982c0f02b..d455ca0f56 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -17,6 +17,10 @@ using namespace v8::internal;
namespace {
+#define CHECK_SLOT_KIND(vector, slot, expected_kind) \
+ CHECK_EQ(expected_kind, vector->GetKind(FeedbackVectorICSlot(slot)));
+
+
TEST(VectorStructure) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -25,7 +29,7 @@ TEST(VectorStructure) {
Zone* zone = isolate->runtime_zone();
// Empty vectors are the empty fixed array.
- FeedbackVectorSpec empty;
+ StaticFeedbackVectorSpec empty;
Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(&empty);
CHECK(Handle<FixedArray>::cast(vector)
.is_identical_to(factory->empty_fixed_array()));
@@ -35,19 +39,21 @@ TEST(VectorStructure) {
CHECK_EQ(0, vector->Slots());
CHECK_EQ(0, vector->ICSlots());
- FeedbackVectorSpec one_slot(1);
+ FeedbackVectorSpec one_slot(zone);
+ one_slot.AddStubSlot();
vector = factory->NewTypeFeedbackVector(&one_slot);
CHECK_EQ(1, vector->Slots());
CHECK_EQ(0, vector->ICSlots());
- ZoneFeedbackVectorSpec one_icslot(zone, 0, 1);
- one_icslot.SetKind(0, Code::CALL_IC);
+ FeedbackVectorSpec one_icslot(zone);
+ one_icslot.AddSlot(FeedbackVectorSlotKind::CALL_IC);
vector = factory->NewTypeFeedbackVector(&one_icslot);
CHECK_EQ(0, vector->Slots());
CHECK_EQ(1, vector->ICSlots());
- ZoneFeedbackVectorSpec spec(zone, 3, 5);
- for (int i = 0; i < 5; i++) spec.SetKind(i, Code::CALL_IC);
+ FeedbackVectorSpec spec(zone);
+ spec.AddStubSlots(3);
+ spec.AddSlots(FeedbackVectorSlotKind::CALL_IC, 5);
vector = factory->NewTypeFeedbackVector(&spec);
CHECK_EQ(3, vector->Slots());
CHECK_EQ(5, vector->ICSlots());
@@ -78,18 +84,21 @@ TEST(VectorICMetadata) {
Factory* factory = isolate->factory();
Zone* zone = isolate->runtime_zone();
- ZoneFeedbackVectorSpec spec(zone, 10, 3 * 10);
+ FeedbackVectorSpec spec(zone);
// Set metadata.
+ spec.AddStubSlots(10);
for (int i = 0; i < 30; i++) {
- Code::Kind kind;
- if (i % 3 == 0) {
- kind = Code::CALL_IC;
- } else if (i % 3 == 1) {
- kind = Code::LOAD_IC;
- } else {
- kind = Code::KEYED_LOAD_IC;
+ switch (i % 3) {
+ case 0:
+ spec.AddSlot(FeedbackVectorSlotKind::CALL_IC);
+ break;
+ case 1:
+ spec.AddSlot(FeedbackVectorSlotKind::LOAD_IC);
+ break;
+ case 2:
+ spec.AddSlot(FeedbackVectorSlotKind::KEYED_LOAD_IC);
+ break;
}
- spec.SetKind(i, kind);
}
Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(&spec);
@@ -104,13 +113,13 @@ TEST(VectorICMetadata) {
// Verify the metadata is correctly set up from the spec.
for (int i = 0; i < 30; i++) {
- Code::Kind kind = vector->GetKind(FeedbackVectorICSlot(i));
+ FeedbackVectorSlotKind kind = vector->GetKind(FeedbackVectorICSlot(i));
if (i % 3 == 0) {
- CHECK_EQ(Code::CALL_IC, kind);
+ CHECK_EQ(FeedbackVectorSlotKind::CALL_IC, kind);
} else if (i % 3 == 1) {
- CHECK_EQ(Code::LOAD_IC, kind);
+ CHECK_EQ(FeedbackVectorSlotKind::LOAD_IC, kind);
} else {
- CHECK_EQ(Code::KEYED_LOAD_IC, kind);
+ CHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
}
}
}
@@ -121,11 +130,13 @@ TEST(VectorSlotClearing) {
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ Zone* zone = isolate->runtime_zone();
// We only test clearing FeedbackVectorSlots, not FeedbackVectorICSlots.
// The reason is that FeedbackVectorICSlots need a full code environment
// to fully test (See VectorICProfilerStatistics test below).
- FeedbackVectorSpec spec(5);
+ FeedbackVectorSpec spec(zone);
+ spec.AddStubSlots(5);
Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(&spec);
// Fill with information
@@ -416,9 +427,17 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be two LOAD_ICs, one for a and one for y at the end.
Handle<TypeFeedbackVector> feedback_vector =
handle(f->shared()->feedback_vector(), isolate);
- CHECK_EQ(2, feedback_vector->ICSlots());
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC);
+ if (FLAG_vector_stores) {
+ CHECK_EQ(4, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 2, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 3, FeedbackVectorSlotKind::LOAD_IC);
+ } else {
+ CHECK_EQ(2, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::LOAD_IC);
+ }
CompileRun(
"function testprop(x) {"
@@ -430,7 +449,11 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be one LOAD_IC, for the load of a.
feedback_vector = handle(f->shared()->feedback_vector(), isolate);
- CHECK_EQ(1, feedback_vector->ICSlots());
+ if (FLAG_vector_stores) {
+ CHECK_EQ(2, feedback_vector->ICSlots());
+ } else {
+ CHECK_EQ(1, feedback_vector->ICSlots());
+ }
CompileRun(
"function testpropfunc(x) {"
@@ -444,11 +467,20 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be 2 LOAD_ICs and 2 CALL_ICs.
feedback_vector = handle(f->shared()->feedback_vector(), isolate);
- CHECK_EQ(4, feedback_vector->ICSlots());
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::CALL_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::CALL_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(3)) == Code::LOAD_IC);
+ if (FLAG_vector_stores) {
+ CHECK_EQ(5, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::CALL_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 2, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 3, FeedbackVectorSlotKind::CALL_IC);
+ CHECK_SLOT_KIND(feedback_vector, 4, FeedbackVectorSlotKind::LOAD_IC);
+ } else {
+ CHECK_EQ(4, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::CALL_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 2, FeedbackVectorSlotKind::CALL_IC);
+ CHECK_SLOT_KIND(feedback_vector, 3, FeedbackVectorSlotKind::LOAD_IC);
+ }
CompileRun(
"function testkeyedprop(x) {"
@@ -462,10 +494,16 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be 1 LOAD_ICs for the load of a, and one KEYED_LOAD_IC for the
// load of x[0] in the return statement.
feedback_vector = handle(f->shared()->feedback_vector(), isolate);
- CHECK_EQ(2, feedback_vector->ICSlots());
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) ==
- Code::KEYED_LOAD_IC);
+ if (FLAG_vector_stores) {
+ CHECK_EQ(3, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::KEYED_STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 2, FeedbackVectorSlotKind::KEYED_LOAD_IC);
+ } else {
+ CHECK_EQ(2, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::KEYED_LOAD_IC);
+ }
CompileRun(
"function testcompound(x) {"
@@ -478,9 +516,47 @@ TEST(ReferenceContextAllocatesNoSlots) {
// There should be 3 LOAD_ICs, for load of a and load of x.old and x.young.
feedback_vector = handle(f->shared()->feedback_vector(), isolate);
- CHECK_EQ(3, feedback_vector->ICSlots());
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(0)) == Code::LOAD_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(1)) == Code::LOAD_IC);
- CHECK(feedback_vector->GetKind(FeedbackVectorICSlot(2)) == Code::LOAD_IC);
+ if (FLAG_vector_stores) {
+ CHECK_EQ(6, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 2, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 3, FeedbackVectorSlotKind::STORE_IC);
+ CHECK_SLOT_KIND(feedback_vector, 4, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 5, FeedbackVectorSlotKind::LOAD_IC);
+ } else {
+ CHECK_EQ(3, feedback_vector->ICSlots());
+ CHECK_SLOT_KIND(feedback_vector, 0, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 1, FeedbackVectorSlotKind::LOAD_IC);
+ CHECK_SLOT_KIND(feedback_vector, 2, FeedbackVectorSlotKind::LOAD_IC);
+ }
+}
+
+
+TEST(VectorStoreICBasic) {
+ if (i::FLAG_always_opt) return;
+ if (!i::FLAG_vector_stores) return;
+
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ CompileRun(
+ "function f(a) {"
+ " a.foo = 5;"
+ "}"
+ "var a = { foo: 3 };"
+ "f(a);"
+ "f(a);"
+ "f(a);");
+ Handle<JSFunction> f = GetFunction("f");
+ // There should be one IC slot.
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(1, feedback_vector->ICSlots());
+ FeedbackVectorICSlot slot(0);
+ StoreICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
}
diff --git a/deps/v8/test/cctest/test-gc-tracer.cc b/deps/v8/test/cctest/test-gc-tracer.cc
index 190644dec1..1289ec5cea 100644
--- a/deps/v8/test/cctest/test-gc-tracer.cc
+++ b/deps/v8/test/cctest/test-gc-tracer.cc
@@ -28,8 +28,7 @@
#include <stdlib.h>
#include <utility>
-#include "src/v8.h"
-
+#include "src/heap/gc-tracer.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index f4c8c1a486..55e553868b 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -32,11 +32,11 @@
#include "src/v8.h"
#include "include/v8-profiler.h"
-#include "src/allocation-tracker.h"
#include "src/debug/debug.h"
#include "src/hashmap.h"
-#include "src/heap-profiler.h"
-#include "src/heap-snapshot-generator-inl.h"
+#include "src/profiler/allocation-tracker.h"
+#include "src/profiler/heap-profiler.h"
+#include "src/profiler/heap-snapshot-generator-inl.h"
#include "test/cctest/cctest.h"
using i::AllocationTraceNode;
@@ -503,10 +503,13 @@ void CheckSimdSnapshot(const char* program, const char* var_name) {
TEST(HeapSnapshotSimd) {
CheckSimdSnapshot("a = SIMD.Float32x4();\n", "a");
CheckSimdSnapshot("a = SIMD.Int32x4();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Uint32x4();\n", "a");
CheckSimdSnapshot("a = SIMD.Bool32x4();\n", "a");
CheckSimdSnapshot("a = SIMD.Int16x8();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Uint16x8();\n", "a");
CheckSimdSnapshot("a = SIMD.Bool16x8();\n", "a");
CheckSimdSnapshot("a = SIMD.Int8x16();\n", "a");
+ CheckSimdSnapshot("a = SIMD.Uint8x16();\n", "a");
CheckSimdSnapshot("a = SIMD.Bool8x16();\n", "a");
}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 5d568e25c1..abcc96670d 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -28,45 +28,24 @@
#include <stdlib.h>
#include <utility>
-#include "src/v8.h"
-
#include "src/compilation-cache.h"
#include "src/context-measure.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/heap/gc-tracer.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap-tester.h"
using v8::Just;
namespace v8 {
namespace internal {
-// Tests that should have access to private methods of {v8::internal::Heap}.
-// Those tests need to be defined using HEAP_TEST(Name) { ... }.
-#define HEAP_TEST_METHODS(V) \
- V(GCFlags)
-
-
-#define HEAP_TEST(Name) \
- CcTest register_test_##Name(HeapTester::Test##Name, __FILE__, #Name, NULL, \
- true, true); \
- void HeapTester::Test##Name()
-
-
-class HeapTester {
- public:
-#define DECLARE_STATIC(Name) static void Test##Name();
-
- HEAP_TEST_METHODS(DECLARE_STATIC)
-#undef HEAP_TEST_METHODS
-};
-
-
static void CheckMap(Map* map, int type, int instance_size) {
CHECK(map->IsHeapObject());
#ifdef DEBUG
@@ -95,16 +74,14 @@ TEST(HeapMaps) {
static void CheckOddball(Isolate* isolate, Object* obj, const char* string) {
CHECK(obj->IsOddball());
Handle<Object> handle(obj, isolate);
- Object* print_string =
- *Execution::ToString(isolate, handle).ToHandleChecked();
+ Object* print_string = *Object::ToString(isolate, handle).ToHandleChecked();
CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
static void CheckSmi(Isolate* isolate, int value, const char* string) {
Handle<Object> handle(Smi::FromInt(value), isolate);
- Object* print_string =
- *Execution::ToString(isolate, handle).ToHandleChecked();
+ Object* print_string = *Object::ToString(isolate, handle).ToHandleChecked();
CHECK(String::cast(print_string)->IsUtf8EqualTo(CStrVector(string)));
}
@@ -113,7 +90,7 @@ static void CheckNumber(Isolate* isolate, double value, const char* string) {
Handle<Object> number = isolate->factory()->NewNumber(value);
CHECK(number->IsNumber());
Handle<Object> print_string =
- Execution::ToString(isolate, number).ToHandleChecked();
+ Object::ToString(isolate, number).ToHandleChecked();
CHECK(String::cast(*print_string)->IsUtf8EqualTo(CStrVector(string)));
}
@@ -309,7 +286,7 @@ TEST(SimdObjects) {
}
// Int32x4
{
- int32_t lanes[4] = {-1, 0, 1, 2};
+ int32_t lanes[4] = {1, 2, 3, 4};
Handle<Int32x4> value = factory->NewInt32x4(lanes);
CHECK(value->IsInt32x4());
@@ -318,12 +295,26 @@ TEST(SimdObjects) {
#ifdef OBJECT_PRINT
std::ostringstream os;
value->Int32x4Print(os);
- CHECK_EQ("-1, 0, 1, 2", os.str());
+ CHECK_EQ("1, 2, 3, 4", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Uint32x4
+ {
+ uint32_t lanes[4] = {1, 2, 3, 4};
+
+ Handle<Uint32x4> value = factory->NewUint32x4(lanes);
+ CHECK(value->IsUint32x4());
+ CheckSimdValue<Uint32x4, uint32_t, 4>(*value, lanes, 3);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Uint32x4Print(os);
+ CHECK_EQ("1, 2, 3, 4", os.str());
#endif // OBJECT_PRINT
}
// Bool32x4
{
- bool lanes[4] = {true, true, true, false};
+ bool lanes[4] = {true, false, true, false};
Handle<Bool32x4> value = factory->NewBool32x4(lanes);
CHECK(value->IsBool32x4());
@@ -332,12 +323,12 @@ TEST(SimdObjects) {
#ifdef OBJECT_PRINT
std::ostringstream os;
value->Bool32x4Print(os);
- CHECK_EQ("true, true, true, false", os.str());
+ CHECK_EQ("true, false, true, false", os.str());
#endif // OBJECT_PRINT
}
// Int16x8
{
- int16_t lanes[8] = {-1, 0, 1, 2, 3, 4, 5, -32768};
+ int16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8};
Handle<Int16x8> value = factory->NewInt16x8(lanes);
CHECK(value->IsInt16x8());
@@ -346,12 +337,26 @@ TEST(SimdObjects) {
#ifdef OBJECT_PRINT
std::ostringstream os;
value->Int16x8Print(os);
- CHECK_EQ("-1, 0, 1, 2, 3, 4, 5, -32768", os.str());
+ CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Uint16x8
+ {
+ uint16_t lanes[8] = {1, 2, 3, 4, 5, 6, 7, 8};
+
+ Handle<Uint16x8> value = factory->NewUint16x8(lanes);
+ CHECK(value->IsUint16x8());
+ CheckSimdValue<Uint16x8, uint16_t, 8>(*value, lanes, 32767);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Uint16x8Print(os);
+ CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8", os.str());
#endif // OBJECT_PRINT
}
// Bool16x8
{
- bool lanes[8] = {true, true, true, true, true, true, true, false};
+ bool lanes[8] = {true, false, true, false, true, false, true, false};
Handle<Bool16x8> value = factory->NewBool16x8(lanes);
CHECK(value->IsBool16x8());
@@ -360,12 +365,12 @@ TEST(SimdObjects) {
#ifdef OBJECT_PRINT
std::ostringstream os;
value->Bool16x8Print(os);
- CHECK_EQ("true, true, true, true, true, true, true, false", os.str());
+ CHECK_EQ("true, false, true, false, true, false, true, false", os.str());
#endif // OBJECT_PRINT
}
// Int8x16
{
- int8_t lanes[16] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -128};
+ int8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
Handle<Int8x16> value = factory->NewInt8x16(lanes);
CHECK(value->IsInt8x16());
@@ -374,14 +379,27 @@ TEST(SimdObjects) {
#ifdef OBJECT_PRINT
std::ostringstream os;
value->Int8x16Print(os);
- CHECK_EQ("-1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, -128",
- os.str());
+ CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str());
+#endif // OBJECT_PRINT
+ }
+ // Uint8x16
+ {
+ uint8_t lanes[16] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16};
+
+ Handle<Uint8x16> value = factory->NewUint8x16(lanes);
+ CHECK(value->IsUint8x16());
+ CheckSimdValue<Uint8x16, uint8_t, 16>(*value, lanes, 127);
+
+#ifdef OBJECT_PRINT
+ std::ostringstream os;
+ value->Uint8x16Print(os);
+ CHECK_EQ("1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16", os.str());
#endif // OBJECT_PRINT
}
// Bool8x16
{
- bool lanes[16] = {true, true, true, true, true, true, true, false,
- true, true, true, true, true, true, true, false};
+ bool lanes[16] = {true, false, true, false, true, false, true, false,
+ true, false, true, false, true, false, true, false};
Handle<Bool8x16> value = factory->NewBool8x16(lanes);
CHECK(value->IsBool8x16());
@@ -391,8 +409,8 @@ TEST(SimdObjects) {
std::ostringstream os;
value->Bool8x16Print(os);
CHECK_EQ(
- "true, true, true, true, true, true, true, false, true, true, true, "
- "true, true, true, true, false",
+ "true, false, true, false, true, false, true, false, true, false, "
+ "true, false, true, false, true, false",
os.str());
#endif // OBJECT_PRINT
}
@@ -691,20 +709,31 @@ TEST(BytecodeArray) {
static const uint8_t kRawBytes[] = {0xc3, 0x7e, 0xa5, 0x5a};
static const int kRawBytesSize = sizeof(kRawBytes);
static const int kFrameSize = 32;
+ static const int kParameterCount = 2;
+ i::FLAG_manual_evacuation_candidates_selection = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
HandleScope scope(isolate);
+ SimulateFullSpace(heap->old_space());
+ Handle<FixedArray> constant_pool = factory->NewFixedArray(5, TENURED);
+ for (int i = 0; i < 5; i++) {
+ Handle<Object> number = factory->NewHeapNumber(i);
+ constant_pool->set(i, *number);
+ }
+
// Allocate and initialize BytecodeArray
- Handle<BytecodeArray> array =
- factory->NewBytecodeArray(kRawBytesSize, kRawBytes, kFrameSize);
+ Handle<BytecodeArray> array = factory->NewBytecodeArray(
+ kRawBytesSize, kRawBytes, kFrameSize, kParameterCount, constant_pool);
CHECK(array->IsBytecodeArray());
CHECK_EQ(array->length(), (int)sizeof(kRawBytes));
CHECK_EQ(array->frame_size(), kFrameSize);
+ CHECK_EQ(array->parameter_count(), kParameterCount);
+ CHECK_EQ(array->constant_pool(), *constant_pool);
CHECK_LE(array->address(), array->GetFirstBytecodeAddress());
CHECK_GE(array->address() + array->BytecodeArraySize(),
array->GetFirstBytecodeAddress() + array->length());
@@ -713,17 +742,25 @@ TEST(BytecodeArray) {
CHECK_EQ(array->get(i), kRawBytes[i]);
}
- // Full garbage collection
+ FixedArray* old_constant_pool_address = *constant_pool;
+
+ // Perform a full garbage collection and force the constant pool to be on an
+ // evacuation candidate.
+ Page* evac_page = Page::FromAddress(constant_pool->address());
+ evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
heap->CollectAllGarbage();
- // BytecodeArray should survive
+ // BytecodeArray should survive.
CHECK_EQ(array->length(), kRawBytesSize);
CHECK_EQ(array->frame_size(), kFrameSize);
-
for (int i = 0; i < kRawBytesSize; i++) {
CHECK_EQ(array->get(i), kRawBytes[i]);
CHECK_EQ(array->GetFirstBytecodeAddress()[i], kRawBytes[i]);
}
+
+ // Constant pool should have been migrated.
+ CHECK_EQ(array->constant_pool(), *constant_pool);
+ CHECK_NE(array->constant_pool(), old_constant_pool_address);
}
@@ -1144,84 +1181,6 @@ TEST(Iteration) {
}
-static int LenFromSize(int size) {
- return (size - FixedArray::kHeaderSize) / kPointerSize;
-}
-
-
-TEST(Regression39128) {
- // Test case for crbug.com/39128.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- TestHeap* heap = CcTest::test_heap();
-
- // Increase the chance of 'bump-the-pointer' allocation in old space.
- heap->CollectAllGarbage();
-
- v8::HandleScope scope(CcTest::isolate());
-
- // The plan: create JSObject which references objects in new space.
- // Then clone this object (forcing it to go into old space) and check
- // that region dirty marks are updated correctly.
-
- // Step 1: prepare a map for the object. We add 1 inobject property to it.
- // Create a map with single inobject property.
- Handle<Map> my_map = Map::Create(CcTest::i_isolate(), 1);
- int n_properties = my_map->GetInObjectProperties();
- CHECK_GT(n_properties, 0);
-
- int object_size = my_map->instance_size();
-
- // Step 2: allocate a lot of objects so to almost fill new space: we need
- // just enough room to allocate JSObject and thus fill the newspace.
-
- int allocation_amount = Min(FixedArray::kMaxSize,
- Page::kMaxRegularHeapObjectSize + kPointerSize);
- int allocation_len = LenFromSize(allocation_amount);
- NewSpace* new_space = heap->new_space();
- Address* top_addr = new_space->allocation_top_address();
- Address* limit_addr = new_space->allocation_limit_address();
- while ((*limit_addr - *top_addr) > allocation_amount) {
- CHECK(!heap->always_allocate());
- Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
- CHECK(new_space->Contains(array));
- }
-
- // Step 3: now allocate fixed array and JSObject to fill the whole new space.
- int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
- int fixed_array_len = LenFromSize(to_fill);
- CHECK(fixed_array_len < FixedArray::kMaxLength);
-
- CHECK(!heap->always_allocate());
- Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
- CHECK(new_space->Contains(array));
-
- Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
- CHECK(new_space->Contains(object));
- JSObject* jsobject = JSObject::cast(object);
- CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
- CHECK_EQ(0, jsobject->properties()->length());
- // Create a reference to object in new space in jsobject.
- FieldIndex index = FieldIndex::ForInObjectOffset(
- JSObject::kHeaderSize - kPointerSize);
- jsobject->FastPropertyAtPut(index, array);
-
- CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
-
- // Step 4: clone jsobject, but force always allocate first to create a clone
- // in old pointer space.
- Address old_space_top = heap->old_space()->top();
- AlwaysAllocateScope aa_scope(isolate);
- Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
- JSObject* clone = JSObject::cast(clone_obj);
- if (clone->address() != old_space_top) {
- // Alas, got allocated from free list, we cannot do checks.
- return;
- }
- CHECK(heap->old_space()->Contains(clone->address()));
-}
-
-
UNINITIALIZED_TEST(TestCodeFlushing) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
@@ -1953,7 +1912,7 @@ TEST(TestSizeOfRegExpCode) {
}
-TEST(TestSizeOfObjects) {
+HEAP_TEST(TestSizeOfObjects) {
v8::V8::Initialize();
// Get initial heap size after several full GCs, which will stabilize
@@ -1976,7 +1935,7 @@ TEST(TestSizeOfObjects) {
AlwaysAllocateScope always_allocate(CcTest::i_isolate());
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
- CcTest::test_heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked();
+ CcTest::heap()->AllocateFixedArray(8192, TENURED).ToObjectChecked();
CHECK_EQ(initial_size + i * filler_size,
static_cast<int>(CcTest::heap()->SizeOfObjects()));
}
@@ -2611,7 +2570,7 @@ TEST(InstanceOfStubWriteBarrier) {
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- marking->Start(Heap::kNoGCFlags);
+ CcTest::heap()->StartIncrementalMarking();
Handle<JSFunction> f =
v8::Utils::OpenHandle(
@@ -2739,7 +2698,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- marking->Start(Heap::kNoGCFlags);
+ CcTest::heap()->StartIncrementalMarking();
// The following calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
SimulateIncrementalMarking(CcTest::heap());
@@ -2796,12 +2755,12 @@ HEAP_TEST(GCFlags) {
Heap* heap = CcTest::heap();
heap->set_current_gc_flags(Heap::kNoGCFlags);
- CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags());
+ CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
// Set the flags to check whether we appropriately resets them after the GC.
heap->set_current_gc_flags(Heap::kAbortIncrementalMarkingMask);
heap->CollectAllGarbage(Heap::kReduceMemoryFootprintMask);
- CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags());
+ CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
@@ -2810,15 +2769,15 @@ HEAP_TEST(GCFlags) {
IncrementalMarking* marking = heap->incremental_marking();
marking->Stop();
- marking->Start(Heap::kReduceMemoryFootprintMask);
- CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask);
+ heap->StartIncrementalMarking(Heap::kReduceMemoryFootprintMask);
+ CHECK_NE(0, heap->current_gc_flags_ & Heap::kReduceMemoryFootprintMask);
heap->CollectGarbage(NEW_SPACE);
// NewSpace scavenges should not overwrite the flags.
- CHECK_NE(0, heap->current_gc_flags() & Heap::kReduceMemoryFootprintMask);
+ CHECK_NE(0, heap->current_gc_flags_ & Heap::kReduceMemoryFootprintMask);
heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags());
+ CHECK_EQ(Heap::kNoGCFlags, heap->current_gc_flags_);
}
@@ -2828,7 +2787,7 @@ TEST(IdleNotificationFinishMarking) {
SimulateFullSpace(CcTest::heap()->old_space());
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- marking->Start(Heap::kNoGCFlags);
+ CcTest::heap()->StartIncrementalMarking();
CHECK_EQ(CcTest::heap()->gc_count(), 0);
@@ -3275,106 +3234,6 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
}
-// Make sure pretenuring feedback is gathered for constructed objects as well
-// as for literals.
-TEST(OptimizedPretenuringConstructorCalls) {
- if (!i::FLAG_pretenuring_call_new) {
- // FLAG_pretenuring_call_new needs to be synced with the snapshot.
- return;
- }
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_expose_gc = true;
- CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
- if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
- v8::HandleScope scope(CcTest::isolate());
-
- // Grow new space unitl maximum capacity reached.
- while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
- CcTest::heap()->new_space()->Grow();
- }
-
- i::ScopedVector<char> source(1024);
- // Call new is doing slack tracking for the first
- // JSFunction::kGenerousAllocationCount allocations, and we can't find
- // mementos during that time.
- i::SNPrintF(
- source,
- "var number_elements = %d;"
- "var elements = new Array(number_elements);"
- "function foo() {"
- " this.a = 3;"
- " this.b = {};"
- "}"
- "function f() {"
- " for (var i = 0; i < number_elements; i++) {"
- " elements[i] = new foo();"
- " }"
- " return elements[number_elements - 1];"
- "};"
- "f(); gc();"
- "f(); f();"
- "%%OptimizeFunctionOnNextCall(f);"
- "f();",
- AllocationSite::kPretenureMinimumCreated +
- JSFunction::kGenerousAllocationCount);
-
- v8::Local<v8::Value> res = CompileRun(source.start());
-
- Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
-
- CHECK(CcTest::heap()->InOldSpace(*o));
-}
-
-
-TEST(OptimizedPretenuringCallNew) {
- if (!i::FLAG_pretenuring_call_new) {
- // FLAG_pretenuring_call_new needs to be synced with the snapshot.
- return;
- }
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_expose_gc = true;
- CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
- if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
- v8::HandleScope scope(CcTest::isolate());
-
- // Grow new space unitl maximum capacity reached.
- while (!CcTest::heap()->new_space()->IsAtMaximumCapacity()) {
- CcTest::heap()->new_space()->Grow();
- }
-
- i::ScopedVector<char> source(1024);
- // Call new is doing slack tracking for the first
- // JSFunction::kGenerousAllocationCount allocations, and we can't find
- // mementos during that time.
- i::SNPrintF(
- source,
- "var number_elements = %d;"
- "var elements = new Array(number_elements);"
- "function g() { this.a = 0; }"
- "function f() {"
- " for (var i = 0; i < number_elements; i++) {"
- " elements[i] = new g();"
- " }"
- " return elements[number_elements - 1];"
- "};"
- "f(); gc();"
- "f(); f();"
- "%%OptimizeFunctionOnNextCall(f);"
- "f();",
- AllocationSite::kPretenureMinimumCreated +
- JSFunction::kGenerousAllocationCount);
-
- v8::Local<v8::Value> res = CompileRun(source.start());
-
- Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
- CHECK(CcTest::heap()->InOldSpace(*o));
-}
-
-
// Test regular array literals allocation.
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
@@ -3753,38 +3612,6 @@ TEST(CountForcedGC) {
}
-TEST(Regress2237) {
- i::FLAG_stress_compaction = false;
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- Factory* factory = isolate->factory();
- v8::HandleScope scope(CcTest::isolate());
- Handle<String> slice(CcTest::heap()->empty_string());
-
- {
- // Generate a parent that lives in new-space.
- v8::HandleScope inner_scope(CcTest::isolate());
- const char* c = "This text is long enough to trigger sliced strings.";
- Handle<String> s = factory->NewStringFromAsciiChecked(c);
- CHECK(s->IsSeqOneByteString());
- CHECK(CcTest::heap()->InNewSpace(*s));
-
- // Generate a sliced string that is based on the above parent and
- // lives in old-space.
- SimulateFullSpace(CcTest::heap()->new_space());
- AlwaysAllocateScope always_allocate(isolate);
- Handle<String> t = factory->NewProperSubString(s, 5, 35);
- CHECK(t->IsSlicedString());
- CHECK(!CcTest::heap()->InNewSpace(*t));
- *slice.location() = *t.location();
- }
-
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
- CcTest::heap()->CollectAllGarbage();
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
-}
-
-
#ifdef OBJECT_PRINT
TEST(PrintSharedFunctionInfo) {
CcTest::InitializeVM();
@@ -3872,11 +3699,11 @@ static void CheckVectorIC(Handle<JSFunction> f, int ic_slot_index,
Handle<TypeFeedbackVector> vector =
Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
FeedbackVectorICSlot slot(ic_slot_index);
- if (vector->GetKind(slot) == Code::LOAD_IC) {
+ if (vector->GetKind(slot) == FeedbackVectorSlotKind::LOAD_IC) {
LoadICNexus nexus(vector, slot);
CHECK(nexus.StateFromFeedback() == desired_state);
} else {
- CHECK(vector->GetKind(slot) == Code::KEYED_LOAD_IC);
+ CHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, vector->GetKind(slot));
KeyedLoadICNexus nexus(vector, slot);
CHECK(nexus.StateFromFeedback() == desired_state);
}
@@ -4637,7 +4464,9 @@ TEST(Regress513507) {
if (!code->is_optimized_code()) return;
}
- Handle<FixedArray> lit = isolate->factory()->empty_fixed_array();
+ Handle<TypeFeedbackVector> vector = handle(shared->feedback_vector());
+ Handle<LiteralsArray> lit =
+ LiteralsArray::New(isolate, vector, shared->num_literals(), TENURED);
Handle<Context> context(isolate->context());
// Add the new code several times to the optimized code map and also set an
@@ -4693,7 +4522,9 @@ TEST(Regress514122) {
if (!code->is_optimized_code()) return;
}
- Handle<FixedArray> lit = isolate->factory()->empty_fixed_array();
+ Handle<TypeFeedbackVector> vector = handle(shared->feedback_vector());
+ Handle<LiteralsArray> lit =
+ LiteralsArray::New(isolate, vector, shared->num_literals(), TENURED);
Handle<Context> context(isolate->context());
// Add the code several times to the optimized code map.
@@ -4711,7 +4542,11 @@ TEST(Regress514122) {
AlwaysAllocateScope always_allocate(isolate);
// Make sure literal is placed on an old-space evacuation candidate.
SimulateFullSpace(heap->old_space());
- Handle<FixedArray> lit = isolate->factory()->NewFixedArray(23, TENURED);
+
+ // Make sure there the number of literals is > 0.
+ Handle<LiteralsArray> lit =
+ LiteralsArray::New(isolate, vector, 23, TENURED);
+
evac_page = Page::FromAddress(lit->address());
BailoutId id = BailoutId(100);
SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, lit, id);
@@ -4740,6 +4575,50 @@ TEST(Regress514122) {
}
+TEST(LargeObjectSlotRecording) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ HandleScope scope(isolate);
+
+ // Create an object on an evacuation candidate.
+ SimulateFullSpace(heap->old_space());
+ Handle<FixedArray> lit = isolate->factory()->NewFixedArray(4, TENURED);
+ Page* evac_page = Page::FromAddress(lit->address());
+ evac_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ FixedArray* old_location = *lit;
+
+ // Allocate a large object.
+ int size = Max(1000000, Page::kMaxRegularHeapObjectSize + KB);
+ CHECK(size > Page::kMaxRegularHeapObjectSize);
+ Handle<FixedArray> lo = isolate->factory()->NewFixedArray(size, TENURED);
+ CHECK(heap->lo_space()->Contains(*lo));
+
+ // Start incremental marking to active write barrier.
+ SimulateIncrementalMarking(heap, false);
+ heap->incremental_marking()->AdvanceIncrementalMarking(
+ 10000000, 10000000, IncrementalMarking::IdleStepActions());
+
+ // Create references from the large object to the object on the evacuation
+ // candidate.
+ const int kStep = size / 10;
+ for (int i = 0; i < size; i += kStep) {
+ lo->set(i, *lit);
+ CHECK(lo->get(i) == old_location);
+ }
+
+ // Move the evaucation candidate object.
+ CcTest::heap()->CollectAllGarbage();
+
+ // Verify that the pointers in the large object got updated.
+ for (int i = 0; i < size; i += kStep) {
+ CHECK_EQ(lo->get(i), *lit);
+ CHECK(lo->get(i) != old_location);
+ }
+}
+
+
class DummyVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) { }
@@ -4776,7 +4655,9 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
"};"
"f(10 * 1024 * 1024);");
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- if (marking->IsStopped()) marking->Start(Heap::kNoGCFlags);
+ if (marking->IsStopped()) {
+ CcTest::heap()->StartIncrementalMarking();
+ }
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
DCHECK(marking->IsComplete() ||
@@ -5504,7 +5385,9 @@ TEST(WeakCellsWithIncrementalMarking) {
Handle<WeakCell> weak_cell = factory->NewWeakCell(value);
CHECK(weak_cell->value()->IsFixedArray());
IncrementalMarking* marking = heap->incremental_marking();
- if (marking->IsStopped()) marking->Start(Heap::kNoGCFlags);
+ if (marking->IsStopped()) {
+ heap->StartIncrementalMarking();
+ }
marking->Step(128, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
heap->CollectGarbage(NEW_SPACE);
CHECK(weak_cell->value()->IsFixedArray());
@@ -5593,6 +5476,38 @@ static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
+UNINITIALIZED_TEST(Regress538257) {
+ i::FLAG_manual_evacuation_candidates_selection = true;
+ v8::Isolate::CreateParams create_params;
+ // Set heap limits.
+ create_params.constraints.set_max_semi_space_size(1 * Page::kPageSize / MB);
+ create_params.constraints.set_max_old_space_size(6 * Page::kPageSize / MB);
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ isolate->Enter();
+ {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ HandleScope handle_scope(i_isolate);
+ PagedSpace* old_space = i_isolate->heap()->old_space();
+ const int kMaxObjects = 10000;
+ const int kFixedArrayLen = 512;
+ Handle<FixedArray> objects[kMaxObjects];
+ for (int i = 0; (i < kMaxObjects) && old_space->CanExpand(Page::kPageSize);
+ i++) {
+ objects[i] = i_isolate->factory()->NewFixedArray(kFixedArrayLen, TENURED);
+ Page::FromAddress(objects[i]->address())
+ ->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ }
+ SimulateFullSpace(old_space);
+ i_isolate->heap()->CollectGarbage(OLD_SPACE);
+ // If we get this far, we've successfully aborted compaction. Any further
+ // allocations might trigger OOM.
+ }
+ isolate->Exit();
+ isolate->Dispose();
+}
+
+
TEST(Regress357137) {
CcTest::InitializeVM();
v8::Isolate* isolate = CcTest::isolate();
@@ -5660,7 +5575,8 @@ TEST(ArrayShiftSweeping) {
CHECK(heap->InOldSpace(o->elements()));
CHECK(heap->InOldSpace(*o));
Page* page = Page::FromAddress(o->elements()->address());
- CHECK(page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE ||
+ CHECK(page->parallel_sweeping_state().Value() <=
+ MemoryChunk::kSweepingFinalize ||
Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
}
@@ -5678,6 +5594,7 @@ UNINITIALIZED_TEST(PromotionQueue) {
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
NewSpace* new_space = heap->new_space();
+ DisableInlineAllocationSteps(new_space);
// In this test we will try to overwrite the promotion queue which is at the
// end of to-space. To actually make that possible, we need at least two
@@ -5797,7 +5714,7 @@ TEST(Regress388880) {
// that would cause crash.
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
marking->Stop();
- marking->Start(Heap::kNoGCFlags);
+ CcTest::heap()->StartIncrementalMarking();
CHECK(marking->IsMarking());
// Now everything is set up for crashing in JSObject::MigrateFastToFast()
@@ -5823,7 +5740,7 @@ TEST(Regress3631) {
"}"
"weak_map");
if (marking->IsStopped()) {
- marking->Start(Heap::kNoGCFlags);
+ CcTest::heap()->StartIncrementalMarking();
}
// Incrementally mark the backing store.
Handle<JSObject> obj =
@@ -5863,13 +5780,13 @@ TEST(Regress442710) {
}
-TEST(NumberStringCacheSize) {
+HEAP_TEST(NumberStringCacheSize) {
// Test that the number-string cache has not been resized in the snapshot.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
if (!isolate->snapshot_available()) return;
Heap* heap = isolate->heap();
- CHECK_EQ(TestHeap::kInitialNumberStringCacheSize * 2,
+ CHECK_EQ(Heap::kInitialNumberStringCacheSize * 2,
heap->number_string_cache()->length());
}
@@ -6308,7 +6225,6 @@ TEST(MessageObjectLeak) {
const char* flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(flag, StrLength(flag));
FLAG_always_opt = true;
- FLAG_turbo_try_catch = true;
FLAG_turbo_try_finally = true;
CompileRun(test);
@@ -6408,52 +6324,6 @@ TEST(AllocationThroughput) {
}
-TEST(SlotsBufferObjectSlotsRemoval) {
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- Factory* factory = isolate->factory();
-
- SlotsBuffer* buffer = new SlotsBuffer(NULL);
- void* fake_object[1];
-
- Handle<FixedArray> array = factory->NewFixedArray(2, TENURED);
- CHECK(heap->old_space()->Contains(*array));
- array->set(0, reinterpret_cast<Object*>(fake_object), SKIP_WRITE_BARRIER);
-
- // Firstly, let's test the regular slots buffer entry.
- buffer->Add(HeapObject::RawField(*array, FixedArray::kHeaderSize));
- CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
- HeapObject::RawField(*array, FixedArray::kHeaderSize));
- SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
- array->address(),
- array->address() + array->Size());
- CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
-
- // Secondly, let's test the typed slots buffer entry.
- SlotsBuffer::AddTo(NULL, &buffer, SlotsBuffer::EMBEDDED_OBJECT_SLOT,
- array->address() + FixedArray::kHeaderSize,
- SlotsBuffer::FAIL_ON_OVERFLOW);
- CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
- reinterpret_cast<Object**>(SlotsBuffer::EMBEDDED_OBJECT_SLOT));
- CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
- HeapObject::RawField(*array, FixedArray::kHeaderSize));
- SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
- array->address(),
- array->address() + array->Size());
- CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
- CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
- HeapObject::RawField(heap->empty_fixed_array(),
- FixedArrayBase::kLengthOffset));
- delete buffer;
-}
-
-
TEST(ContextMeasure) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
@@ -6479,5 +6349,106 @@ TEST(ContextMeasure) {
CHECK_LE(measure.Count(), count_upper_limit);
CHECK_LE(measure.Size(), size_upper_limit);
}
+
+
+TEST(ScriptIterator) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+ LocalContext context;
+
+ heap->CollectAllGarbage();
+
+ int script_count = 0;
+ {
+ HeapIterator it(heap);
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ if (obj->IsScript()) script_count++;
+ }
+ }
+
+ {
+ Script::Iterator iterator(isolate);
+ while (iterator.Next()) script_count--;
+ }
+
+ CHECK_EQ(0, script_count);
+}
+
+
+TEST(SharedFunctionInfoIterator) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = CcTest::heap();
+ LocalContext context;
+
+ heap->CollectAllGarbage();
+ heap->CollectAllGarbage();
+
+ int sfi_count = 0;
+ {
+ HeapIterator it(heap);
+ for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
+ if (!obj->IsSharedFunctionInfo()) continue;
+ // Shared function infos without a script (API functions or C++ builtins)
+ // are not returned by the iterator because they are not created from a
+ // script. They are not interesting for type feedback vector anyways.
+
+ // TODO(mvstanton): There are builtins that use type feedback vectors,
+ // consider adding these to the iterator.
+ SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+ if (shared->script()->IsUndefined()) {
+ CHECK(shared->native() || 0 == shared->feedback_vector()->ICSlots());
+ } else {
+ sfi_count++;
+ }
+ }
+ }
+
+ {
+ SharedFunctionInfo::Iterator iterator(isolate);
+ while (iterator.Next()) sfi_count--;
+ }
+
+ CHECK_EQ(0, sfi_count);
+}
+
+
+template <typename T>
+static UniqueId MakeUniqueId(const Persistent<T>& p) {
+ return UniqueId(reinterpret_cast<uintptr_t>(*v8::Utils::OpenPersistent(p)));
+}
+
+
+TEST(Regress519319) {
+ CcTest::InitializeVM();
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ Heap* heap = CcTest::heap();
+ LocalContext context;
+
+ v8::Persistent<Value> parent;
+ v8::Persistent<Value> child;
+
+ parent.Reset(isolate, v8::Object::New(isolate));
+ child.Reset(isolate, v8::Object::New(isolate));
+
+ SimulateFullSpace(heap->old_space());
+ heap->CollectGarbage(OLD_SPACE);
+ {
+ UniqueId id = MakeUniqueId(parent);
+ isolate->SetObjectGroupId(parent, id);
+ isolate->SetReferenceFromGroup(id, child);
+ }
+ // The CollectGarbage call above starts sweeper threads.
+ // The crash will happen if the following two functions
+ // are called before sweeping finishes.
+ heap->StartIncrementalMarking();
+ heap->FinalizeIncrementalMarkingIfComplete("test");
+}
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index 787fda77d1..ee64c4dd40 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -4,7 +4,7 @@
#include "src/v8.h"
-#include "src/heap/identity-map.h"
+#include "src/identity-map.h"
#include "src/zone.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-incremental-marking.cc b/deps/v8/test/cctest/test-incremental-marking.cc
new file mode 100644
index 0000000000..d8bdeeee37
--- /dev/null
+++ b/deps/v8/test/cctest/test-incremental-marking.cc
@@ -0,0 +1,168 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#ifdef __linux__
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#include <utility>
+
+#include "src/v8.h"
+
+#include "src/full-codegen/full-codegen.h"
+#include "src/global-handles.h"
+#include "test/cctest/cctest.h"
+
+using v8::IdleTask;
+using v8::Task;
+using v8::Isolate;
+
+
+class MockPlatform : public v8::Platform {
+ public:
+ explicit MockPlatform(v8::Platform* platform)
+ : platform_(platform), idle_task_(nullptr), delayed_task_(nullptr) {}
+ virtual ~MockPlatform() {
+ delete idle_task_;
+ delete delayed_task_;
+ }
+
+ void CallOnBackgroundThread(Task* task,
+ ExpectedRuntime expected_runtime) override {
+ platform_->CallOnBackgroundThread(task, expected_runtime);
+ }
+
+ void CallOnForegroundThread(Isolate* isolate, Task* task) override {
+ platform_->CallOnForegroundThread(isolate, task);
+ }
+
+ void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
+ double delay_in_seconds) override {
+ if (delayed_task_ != nullptr) {
+ delete delayed_task_;
+ }
+ delayed_task_ = task;
+ }
+
+ double MonotonicallyIncreasingTime() override {
+ return platform_->MonotonicallyIncreasingTime();
+ }
+
+ void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) override {
+ CHECK(nullptr == idle_task_);
+ idle_task_ = task;
+ }
+
+ bool IdleTasksEnabled(Isolate* isolate) override { return true; }
+
+ bool PendingIdleTask() { return idle_task_ != nullptr; }
+
+ void PerformIdleTask(double idle_time_in_seconds) {
+ IdleTask* task = idle_task_;
+ idle_task_ = nullptr;
+ task->Run(MonotonicallyIncreasingTime() + idle_time_in_seconds);
+ delete task;
+ }
+
+ bool PendingDelayedTask() { return delayed_task_ != nullptr; }
+
+ void PerformDelayedTask() {
+ Task* task = delayed_task_;
+ delayed_task_ = nullptr;
+ task->Run();
+ delete task;
+ }
+
+ private:
+ v8::Platform* platform_;
+ IdleTask* idle_task_;
+ Task* delayed_task_;
+};
+
+
+TEST(IncrementalMarkingUsingIdleTasks) {
+ if (!i::FLAG_incremental_marking) return;
+ CcTest::InitializeVM();
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+ SimulateFullSpace(CcTest::heap()->old_space());
+ i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+ marking->Stop();
+ marking->Start();
+ CHECK(platform.PendingIdleTask());
+ const double kLongIdleTimeInSeconds = 1;
+ const double kShortIdleTimeInSeconds = 0.010;
+ const int kShortStepCount = 10;
+ for (int i = 0; i < kShortStepCount && platform.PendingIdleTask(); i++) {
+ platform.PerformIdleTask(kShortIdleTimeInSeconds);
+ }
+ while (platform.PendingIdleTask()) {
+ platform.PerformIdleTask(kLongIdleTimeInSeconds);
+ }
+ CHECK(marking->IsStopped());
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(IncrementalMarkingUsingIdleTasksAfterGC) {
+ if (!i::FLAG_incremental_marking) return;
+ CcTest::InitializeVM();
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+ SimulateFullSpace(CcTest::heap()->old_space());
+ CcTest::heap()->CollectAllGarbage();
+ i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+ marking->Stop();
+ marking->Start();
+ CHECK(platform.PendingIdleTask());
+ const double kLongIdleTimeInSeconds = 1;
+ const double kShortIdleTimeInSeconds = 0.010;
+ const int kShortStepCount = 10;
+ for (int i = 0; i < kShortStepCount && platform.PendingIdleTask(); i++) {
+ platform.PerformIdleTask(kShortIdleTimeInSeconds);
+ }
+ while (platform.PendingIdleTask()) {
+ platform.PerformIdleTask(kLongIdleTimeInSeconds);
+ }
+ CHECK(marking->IsStopped());
+ i::V8::SetPlatformForTesting(old_platform);
+}
+
+
+TEST(IncrementalMarkingUsingDelayedTasks) {
+ if (!i::FLAG_incremental_marking) return;
+ CcTest::InitializeVM();
+ v8::Platform* old_platform = i::V8::GetCurrentPlatform();
+ MockPlatform platform(old_platform);
+ i::V8::SetPlatformForTesting(&platform);
+ SimulateFullSpace(CcTest::heap()->old_space());
+ i::IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+ marking->Stop();
+ marking->Start();
+ CHECK(platform.PendingIdleTask());
+ // The delayed task should be a no-op if the idle task makes progress.
+ const int kIgnoredDelayedTaskStepCount = 1000;
+ for (int i = 0; i < kIgnoredDelayedTaskStepCount; i++) {
+ // Dummy idle task progress.
+ marking->incremental_marking_job()->NotifyIdleTaskProgress();
+ CHECK(platform.PendingDelayedTask());
+ platform.PerformDelayedTask();
+ }
+ // Once we stop notifying idle task progress, the delayed tasks
+ // should finish marking.
+ while (!marking->IsStopped() && platform.PendingDelayedTask()) {
+ platform.PerformDelayedTask();
+ }
+ // There could be pending delayed task from memory reducer after GC finishes.
+ CHECK(marking->IsStopped());
+ i::V8::SetPlatformForTesting(old_platform);
+}
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index e4ca28212d..f991e8fd80 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -36,7 +36,7 @@
#include "src/disassembler.h"
#include "src/isolate.h"
#include "src/log.h"
-#include "src/sampler.h"
+#include "src/profiler/sampler.h"
#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/trace-extension.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index daf2e688b6..925803cc15 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -36,9 +36,9 @@
#include "src/v8.h"
-#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/log-utils.h"
+#include "src/profiler/cpu-profiler.h"
#include "src/snapshot/natives.h"
#include "src/utils.h"
#include "src/v8threads.h"
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 73369d29e5..84cb051205 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -42,6 +42,7 @@
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap-tester.h"
using namespace v8::internal;
using v8::Just;
@@ -74,9 +75,9 @@ TEST(MarkingDeque) {
}
-TEST(Promotion) {
+HEAP_TEST(Promotion) {
CcTest::InitializeVM();
- TestHeap* heap = CcTest::test_heap();
+ Heap* heap = CcTest::heap();
heap->ConfigureHeap(1, 1, 1, 0);
v8::HandleScope sc(CcTest::isolate());
@@ -100,9 +101,9 @@ TEST(Promotion) {
}
-TEST(NoPromotion) {
+HEAP_TEST(NoPromotion) {
CcTest::InitializeVM();
- TestHeap* heap = CcTest::test_heap();
+ Heap* heap = CcTest::heap();
heap->ConfigureHeap(1, 1, 1, 0);
v8::HandleScope sc(CcTest::isolate());
@@ -125,12 +126,12 @@ TEST(NoPromotion) {
}
-TEST(MarkCompactCollector) {
+HEAP_TEST(MarkCompactCollector) {
FLAG_incremental_marking = false;
FLAG_retain_maps_for_n_gc = 0;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
- TestHeap* heap = CcTest::test_heap();
+ Heap* heap = CcTest::heap();
Factory* factory = isolate->factory();
v8::HandleScope sc(CcTest::isolate());
@@ -244,11 +245,11 @@ static void WeakPointerCallback(
}
-TEST(ObjectGroups) {
+HEAP_TEST(ObjectGroups) {
FLAG_incremental_marking = false;
CcTest::InitializeVM();
GlobalHandles* global_handles = CcTest::i_isolate()->global_handles();
- TestHeap* heap = CcTest::test_heap();
+ Heap* heap = CcTest::heap();
NumberOfWeakCalls = 0;
v8::HandleScope handle_scope(CcTest::isolate());
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index a97666384b..b26aad03a5 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -95,45 +95,3 @@ TEST(BadMementoAfterTopForceScavenge) {
// Force GC to test the poisoned memento handling
CcTest::i_isolate()->heap()->CollectGarbage(i::NEW_SPACE);
}
-
-
-TEST(PretenuringCallNew) {
- CcTest::InitializeVM();
- if (!i::FLAG_allocation_site_pretenuring) return;
- if (!i::FLAG_pretenuring_call_new) return;
- if (i::FLAG_always_opt) return;
-
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
-
- int call_count = 10;
- i::ScopedVector<char> test_buf(1024);
- const char* program =
- "function f() {"
- " this.a = 3;"
- " this.b = {};"
- " return this;"
- "};"
- "var a;"
- "for(var i = 0; i < %d; i++) {"
- " a = new f();"
- "}"
- "a;";
- i::SNPrintF(test_buf, program, call_count);
- v8::Local<v8::Value> res = CompileRun(test_buf.start());
- Handle<JSObject> o =
- v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
-
- // The object of class f should have a memento secreted behind it.
- Address memento_address = o->address() + o->map()->instance_size();
- AllocationMemento* memento =
- reinterpret_cast<AllocationMemento*>(memento_address + kHeapObjectTag);
- CHECK_EQ(memento->map(), heap->allocation_memento_map());
-
- // Furthermore, how many mementos did we create? The count should match
- // call_count. Note, that mementos are allocated during the inobject slack
- // tracking phase.
- AllocationSite* site = memento->GetAllocationSite();
- CHECK_EQ(call_count, site->pretenure_create_count()->value());
-}
diff --git a/deps/v8/test/cctest/test-migrations.cc b/deps/v8/test/cctest/test-migrations.cc
index 3ace0488f8..d441990e3c 100644
--- a/deps/v8/test/cctest/test-migrations.cc
+++ b/deps/v8/test/cctest/test-migrations.cc
@@ -7,7 +7,6 @@
#include "src/v8.h"
-#include "src/code-stubs.h"
#include "src/compilation-cache.h"
#include "src/execution.h"
#include "src/factory.h"
@@ -568,7 +567,6 @@ static void TestGeneralizeRepresentation(
CHECK(expectations.Check(*map));
Zone zone;
- FakeStubForTesting stub(isolate);
if (is_detached_map) {
detach_point_map = Map::ReconfigureProperty(
@@ -583,7 +581,7 @@ static void TestGeneralizeRepresentation(
// Create new maps by generalizing representation of propX field.
Handle<Map> field_owner(map->FindFieldOwner(property_index), isolate);
- CompilationInfo info(&stub, isolate, &zone);
+ CompilationInfo info("testing", isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -959,9 +957,8 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
CHECK(expectations2.Check(*map2));
Zone zone;
- FakeStubForTesting stub(isolate);
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
- CompilationInfo info(&stub, isolate, &zone);
+ CompilationInfo info("testing", isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
@@ -1045,9 +1042,8 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
CHECK(expectations2.Check(*map2));
Zone zone;
- FakeStubForTesting stub(isolate);
Handle<Map> field_owner(map->FindFieldOwner(kSplitProp), isolate);
- CompilationInfo info(&stub, isolate, &zone);
+ CompilationInfo info("testing", isolate, &zone);
CHECK(!info.dependencies()->HasAborted());
info.dependencies()->AssumeFieldType(field_owner);
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 94605c6a36..6300c0987d 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1134,10 +1134,19 @@ static void CheckParsesToNumber(const char* source, bool with_dot) {
CHECK(fun->body()->length() == 1);
CHECK(fun->body()->at(0)->IsReturnStatement());
i::ReturnStatement* ret = fun->body()->at(0)->AsReturnStatement();
- CHECK(ret->expression()->IsLiteral());
i::Literal* lit = ret->expression()->AsLiteral();
- const i::AstValue* val = lit->raw_value();
- CHECK(with_dot == val->ContainsDot());
+ if (lit != NULL) {
+ const i::AstValue* val = lit->raw_value();
+ CHECK(with_dot == val->ContainsDot());
+ } else if (with_dot) {
+ i::BinaryOperation* bin = ret->expression()->AsBinaryOperation();
+ CHECK(bin != NULL);
+ CHECK_EQ(i::Token::MUL, bin->op());
+ i::Literal* rlit = bin->right()->AsLiteral();
+ const i::AstValue* val = rlit->raw_value();
+ CHECK(with_dot == val->ContainsDot());
+ CHECK_EQ(1.0, val->AsNumber());
+ }
}
@@ -1148,6 +1157,7 @@ TEST(ParseNumbers) {
CheckParsesToNumber("134.e44", true);
CheckParsesToNumber("134.44e44", true);
CheckParsesToNumber(".44", true);
+ CheckParsesToNumber("+x", true);
}
@@ -1488,6 +1498,7 @@ enum ParserFlag {
kAllowLazy,
kAllowNatives,
kAllowHarmonyArrowFunctions,
+ kAllowHarmonyDefaultParameters,
kAllowHarmonyRestParameters,
kAllowHarmonySloppy,
kAllowHarmonySloppyLet,
@@ -1513,9 +1524,11 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
parser->set_allow_natives(flags.Contains(kAllowNatives));
parser->set_allow_harmony_arrow_functions(
flags.Contains(kAllowHarmonyArrowFunctions));
+ parser->set_allow_harmony_default_parameters(
+ flags.Contains(kAllowHarmonyDefaultParameters));
parser->set_allow_harmony_rest_parameters(
flags.Contains(kAllowHarmonyRestParameters));
- parser->set_allow_harmony_spreadcalls(
+ parser->set_allow_harmony_spread_calls(
flags.Contains(kAllowHarmonySpreadCalls));
parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
parser->set_allow_harmony_sloppy_let(flags.Contains(kAllowHarmonySloppyLet));
@@ -2780,27 +2793,6 @@ TEST(NoErrorsRegexpLiteral) {
}
-TEST(Intrinsics) {
- const char* context_data[][2] = {
- {"", ""},
- { NULL, NULL }
- };
-
- const char* statement_data[] = {
- "%someintrinsic(arg)",
- NULL
- };
-
- // This test requires kAllowNatives to succeed.
- static const ParserFlag always_true_flags[] = {
- kAllowNatives
- };
-
- RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
- always_true_flags, 1);
-}
-
-
TEST(NoErrorsNewExpression) {
const char* context_data[][2] = {
{"", ""},
@@ -3792,10 +3784,30 @@ TEST(NoErrorsArrowFunctions) {
// Arrow has more precedence, this is the same as: foo ? bar : (baz = {})
"foo ? bar : baz => {}",
+
+ // Arrows with non-simple parameters.
+ "({a}) => {}",
+ "(x = 9) => {}",
+ "(x, y = 9) => {}",
+ "(x = 9, y) => {}",
+ "(x, y = 9, z) => {}",
+ "(x, y = 9, z = 8) => {}",
+ "(...a) => {}",
+ "(x, ...a) => {}",
+ "(x = 9, ...a) => {}",
+ "(x, y = 9, ...a) => {}",
+ "(x, y = 9, {b}, z = 8, ...a) => {}",
+ // TODO(wingo, rossberg): This is not accepted right now.
+ // "({a} = {}) => {}",
+ // "([x] = []) => {}",
+ "({a = 42}) => {}",
+ "([x = 0]) => {}",
NULL
};
- static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyArrowFunctions, kAllowHarmonyDefaultParameters,
+ kAllowHarmonyRestParameters, kAllowHarmonyDestructuring};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -3861,6 +3873,50 @@ TEST(ArrowFunctionsSloppyParameterNames) {
}
+TEST(ArrowFunctionsYieldParameterNameInGenerator) {
+ const char* sloppy_function_context_data[][2] = {
+ {"(function f() { (", "); });"},
+ {NULL, NULL}
+ };
+
+ const char* strict_function_context_data[][2] = {
+ {"(function f() {'use strong'; (", "); });"},
+ {"(function f() {'use strict'; (", "); });"},
+ {NULL, NULL}
+ };
+
+ const char* generator_context_data[][2] = {
+ {"(function *g() {'use strong'; (", "); });"},
+ {"(function *g() {'use strict'; (", "); });"},
+ {"(function *g() { (", "); });"},
+ {NULL, NULL}
+ };
+
+ const char* arrow_data[] = {
+ "yield => {}",
+ "(yield) => {}",
+ "(a, yield) => {}",
+ "(yield, a) => {}",
+ "(yield, ...a) => {}",
+ "(a, ...yield) => {}",
+ "({yield}) => {}",
+ "([yield]) => {}",
+ NULL
+ };
+
+ static const ParserFlag always_flags[] = { kAllowHarmonyDestructuring,
+ kAllowHarmonyRestParameters,
+ kAllowHarmonyArrowFunctions,
+ kAllowStrongMode};
+ RunParserSyncTest(sloppy_function_context_data, arrow_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_function_context_data, arrow_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(generator_context_data, arrow_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
TEST(SuperNoErrors) {
// Tests that parser and preparser accept 'super' keyword in right places.
const char* context_data[][2] = {
@@ -6114,6 +6170,33 @@ TEST(StrongConstructorReturns) {
}
+TEST(StrongConstructorDirective) {
+ const char* context_data[][2] = {{"class c { ", " }"},
+ {"(class c { ", " });"},
+ {"let a = (class c { ", " });"},
+ {NULL}};
+
+ const char* error_data[] = {
+ "constructor() { \"use strong\" }",
+ "constructor(...rest) { \"use strong\" }",
+ "foo() {} constructor() { \"use strong\" }",
+ "foo(...rest) { \"use strict\" } constructor() { \"use strong\" }", NULL};
+
+ const char* success_data[] = {
+ "constructor() { \"use strict\" }", "foo() { \"use strong\" }",
+ "foo() { \"use strong\" } constructor() {}", NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyRestParameters, kAllowHarmonySloppy, kAllowHarmonySloppyLet,
+ kAllowStrongMode};
+
+ RunParserSyncTest(context_data, error_data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(context_data, success_data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
TEST(StrongUndefinedLocal) {
const char* context_data[][2] = {{"", ""}, {NULL}};
@@ -6458,6 +6541,8 @@ TEST(DestructuringPositiveTests) {
"a",
"{ x : y }",
"{ x : y = 1 }",
+ "{ get, set }",
+ "{ get = 1, set = 2 }",
"[a]",
"[a = 1]",
"[a,b,c]",
@@ -6768,6 +6853,102 @@ TEST(DestructuringDisallowPatternsInRestParams) {
}
+TEST(DefaultParametersYieldInInitializers) {
+ // clang-format off
+ const char* sloppy_function_context_data[][2] = {
+ {"(function f(", ") { });"},
+ {NULL, NULL}
+ };
+
+ const char* strict_function_context_data[][2] = {
+ {"'use strong'; (function f(", ") { });"},
+ {"'use strict'; (function f(", ") { });"},
+ {NULL, NULL}
+ };
+
+ const char* sloppy_arrow_context_data[][2] = {
+ {"((", ")=>{});"},
+ {NULL, NULL}
+ };
+
+ const char* strict_arrow_context_data[][2] = {
+ {"'use strong'; ((", ")=>{});"},
+ {"'use strict'; ((", ")=>{});"},
+ {NULL, NULL}
+ };
+
+ const char* generator_context_data[][2] = {
+ {"'use strong'; (function *g(", ") { });"},
+ {"'use strict'; (function *g(", ") { });"},
+ {"(function *g(", ") { });"},
+ {NULL, NULL}
+ };
+
+ const char* parameter_data[] = {
+ "x=yield",
+ "x, y=yield",
+ "{x=yield}",
+ "[x=yield]",
+
+ "x=(yield)",
+ "x, y=(yield)",
+ "{x=(yield)}",
+ "[x=(yield)]",
+
+ "x=f(yield)",
+ "x, y=f(yield)",
+ "{x=f(yield)}",
+ "[x=f(yield)]",
+ NULL
+ };
+
+ // TODO(wingo): These aren't really destructuring assignment patterns; we're
+ // just splitting them for now until the parser gets support for arrow
+ // function arguments that look like destructuring assignments. When that
+ // happens we should unify destructuring_assignment_data and parameter_data.
+ const char* destructuring_assignment_data[] = {
+ "{x}=yield",
+ "[x]=yield",
+
+ "{x}=(yield)",
+ "[x]=(yield)",
+
+ "{x}=f(yield)",
+ "[x]=f(yield)",
+ NULL
+ };
+
+ // clang-format on
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyDestructuring, kAllowHarmonyDefaultParameters,
+ kAllowHarmonyArrowFunctions, kAllowStrongMode};
+
+ RunParserSyncTest(sloppy_function_context_data, parameter_data, kSuccess,
+ NULL, 0, always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_function_context_data, destructuring_assignment_data,
+ kSuccess, NULL, 0, always_flags, arraysize(always_flags));
+ RunParserSyncTest(sloppy_arrow_context_data, parameter_data, kSuccess, NULL,
+ 0, always_flags, arraysize(always_flags));
+ // TODO(wingo): Will change to kSuccess when destructuring assignment lands.
+ RunParserSyncTest(sloppy_arrow_context_data, destructuring_assignment_data,
+ kError, NULL, 0, always_flags, arraysize(always_flags));
+
+ RunParserSyncTest(strict_function_context_data, parameter_data, kError, NULL,
+ 0, always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_function_context_data, destructuring_assignment_data,
+ kError, NULL, 0, always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_arrow_context_data, parameter_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(strict_arrow_context_data, destructuring_assignment_data,
+ kError, NULL, 0, always_flags, arraysize(always_flags));
+
+ RunParserSyncTest(generator_context_data, parameter_data, kError, NULL, 0,
+ always_flags, arraysize(always_flags));
+ RunParserSyncTest(generator_context_data, destructuring_assignment_data,
+ kError, NULL, 0, always_flags, arraysize(always_flags));
+}
+
+
TEST(SpreadArray) {
i::FLAG_harmony_spread_arrays = true;
@@ -6941,3 +7122,119 @@ TEST(LetSloppy) {
RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
}
+
+
+TEST(LanguageModeDirectivesNonSimpleParameterListErrors) {
+ // TC39 deemed "use strict" directives to be an error when occurring in the
+ // body of a function with non-simple parameter list, on 29/7/2015.
+ // https://goo.gl/ueA7Ln
+ //
+ // In V8, this also applies to "use strong " directives.
+ const char* context_data[][2] = {
+ {"function f(", ") { 'use strict'; }"},
+ {"function f(", ") { 'use strong'; }"},
+ {"function* g(", ") { 'use strict'; }"},
+ {"function* g(", ") { 'use strong'; }"},
+ {"class c { foo(", ") { 'use strict' }"},
+ {"class c { foo(", ") { 'use strong' }"},
+ {"var a = (", ") => { 'use strict'; }"},
+ {"var a = (", ") => { 'use strong'; }"},
+ {"var o = { m(", ") { 'use strict'; }"},
+ {"var o = { m(", ") { 'use strong'; }"},
+ {"var o = { *gm(", ") { 'use strict'; }"},
+ {"var o = { *gm(", ") { 'use strong'; }"},
+ {"var c = { m(", ") { 'use strict'; }"},
+ {"var c = { m(", ") { 'use strong'; }"},
+ {"var c = { *gm(", ") { 'use strict'; }"},
+ {"var c = { *gm(", ") { 'use strong'; }"},
+
+ {"'use strict'; function f(", ") { 'use strict'; }"},
+ {"'use strict'; function f(", ") { 'use strong'; }"},
+ {"'use strict'; function* g(", ") { 'use strict'; }"},
+ {"'use strict'; function* g(", ") { 'use strong'; }"},
+ {"'use strict'; class c { foo(", ") { 'use strict' }"},
+ {"'use strict'; class c { foo(", ") { 'use strong' }"},
+ {"'use strict'; var a = (", ") => { 'use strict'; }"},
+ {"'use strict'; var a = (", ") => { 'use strong'; }"},
+ {"'use strict'; var o = { m(", ") { 'use strict'; }"},
+ {"'use strict'; var o = { m(", ") { 'use strong'; }"},
+ {"'use strict'; var o = { *gm(", ") { 'use strict'; }"},
+ {"'use strict'; var o = { *gm(", ") { 'use strong'; }"},
+ {"'use strict'; var c = { m(", ") { 'use strict'; }"},
+ {"'use strict'; var c = { m(", ") { 'use strong'; }"},
+ {"'use strict'; var c = { *gm(", ") { 'use strict'; }"},
+ {"'use strict'; var c = { *gm(", ") { 'use strong'; }"},
+
+ {"'use strong'; function f(", ") { 'use strict'; }"},
+ {"'use strong'; function f(", ") { 'use strong'; }"},
+ {"'use strong'; function* g(", ") { 'use strict'; }"},
+ {"'use strong'; function* g(", ") { 'use strong'; }"},
+ {"'use strong'; class c { foo(", ") { 'use strict' }"},
+ {"'use strong'; class c { foo(", ") { 'use strong' }"},
+ {"'use strong'; var a = (", ") => { 'use strict'; }"},
+ {"'use strong'; var a = (", ") => { 'use strong'; }"},
+ {"'use strong'; var o = { m(", ") { 'use strict'; }"},
+ {"'use strong'; var o = { m(", ") { 'use strong'; }"},
+ {"'use strong'; var o = { *gm(", ") { 'use strict'; }"},
+ {"'use strong'; var o = { *gm(", ") { 'use strong'; }"},
+ {"'use strong'; var c = { m(", ") { 'use strict'; }"},
+ {"'use strong'; var c = { m(", ") { 'use strong'; }"},
+ {"'use strong'; var c = { *gm(", ") { 'use strict'; }"},
+ {"'use strong'; var c = { *gm(", ") { 'use strong'; }"},
+
+ {NULL, NULL}};
+
+ const char* data[] = {
+ // TODO(@caitp): support formal parameter initializers
+ "{}",
+ "[]",
+ "[{}]",
+ "{a}",
+ "a, {b}",
+ "a, b, {c, d, e}",
+ "initializer = true",
+ "a, b, c = 1",
+ "...args",
+ "a, b, ...rest",
+ "[a, b, ...rest]",
+ "{ bindingPattern = {} }",
+ "{ initializedBindingPattern } = { initializedBindingPattern: true }",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowHarmonyArrowFunctions, kAllowHarmonyDefaultParameters,
+ kAllowHarmonyDestructuring, kAllowHarmonyRestParameters,
+ kAllowHarmonySloppy, kAllowStrongMode
+ };
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(LetSloppyOnly) {
+ // clang-format off
+ const char* context_data[][2] = {
+ {"", ""},
+ {"{", "}"},
+ {NULL, NULL}
+ };
+
+ const char* data[] = {
+ "let let",
+ "let",
+ "let let = 1",
+ "let = 1",
+ "for (let let = 1; let < 1; let++) {}",
+ "for (let = 1; let < 1; let++) {}",
+ "for (let let in {}) {}",
+ "for (let let of []) {}",
+ "for (let in {}) {}",
+ NULL
+ };
+ // clang-format on
+
+ static const ParserFlag always_flags[] = {kAllowHarmonySloppy,
+ kAllowHarmonySloppyLet};
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 221761487c..78a8204556 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -30,8 +30,8 @@
#include "src/v8.h"
#include "include/v8-profiler.h"
-#include "src/cpu-profiler.h"
-#include "src/profile-generator-inl.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/profiler/profile-generator-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/profiler-extension.h"
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index bf36081201..a37d6a0f19 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1599,7 +1599,7 @@ TEST(SerializeInternalReference) {
return;
#endif
// Disable experimental natives that are loaded after deserialization.
- FLAG_context_specialization = false;
+ FLAG_function_context_specialization = false;
FLAG_always_opt = true;
const char* flag = "--turbo-filter=foo";
FlagList::SetFlagsFromString(flag, StrLength(flag));
diff --git a/deps/v8/test/cctest/test-simd.cc b/deps/v8/test/cctest/test-simd.cc
index fd72b695ee..1f412affba 100644
--- a/deps/v8/test/cctest/test-simd.cc
+++ b/deps/v8/test/cctest/test-simd.cc
@@ -109,9 +109,12 @@ TEST(SimdTypes) {
FLOAT_TEST(Float32x4, 4)
INT_TEST(Int32x4, 4, int32_t)
+ INT_TEST(Uint32x4, 4, uint32_t)
BOOL_TEST(Bool32x4, 4)
INT_TEST(Int16x8, 8, int16_t)
+ INT_TEST(Uint16x8, 8, uint16_t)
BOOL_TEST(Bool16x8, 8)
INT_TEST(Int8x16, 16, int8_t)
+ INT_TEST(Uint8x16, 16, uint8_t)
BOOL_TEST(Bool8x16, 16)
}
diff --git a/deps/v8/test/cctest/test-slots-buffer.cc b/deps/v8/test/cctest/test-slots-buffer.cc
new file mode 100644
index 0000000000..a47c584fff
--- /dev/null
+++ b/deps/v8/test/cctest/test-slots-buffer.cc
@@ -0,0 +1,132 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/slots-buffer.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(SlotsBufferObjectSlotsRemoval) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+
+ SlotsBuffer* buffer = new SlotsBuffer(NULL);
+ void* fake_object[1];
+
+ Handle<FixedArray> array = factory->NewFixedArray(2, TENURED);
+ CHECK(heap->old_space()->Contains(*array));
+ array->set(0, reinterpret_cast<Object*>(fake_object), SKIP_WRITE_BARRIER);
+
+ // Firstly, let's test the regular slots buffer entry.
+ buffer->Add(HeapObject::RawField(*array, FixedArray::kHeaderSize));
+ CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
+ HeapObject::RawField(*array, FixedArray::kHeaderSize));
+ SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
+ array->address(),
+ array->address() + array->Size());
+ CHECK(reinterpret_cast<void*>(buffer->Get(0)) ==
+ HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset));
+
+ // Secondly, let's test the typed slots buffer entry.
+ SlotsBuffer::AddTo(NULL, &buffer, SlotsBuffer::EMBEDDED_OBJECT_SLOT,
+ array->address() + FixedArray::kHeaderSize,
+ SlotsBuffer::FAIL_ON_OVERFLOW);
+ CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
+ reinterpret_cast<Object**>(SlotsBuffer::EMBEDDED_OBJECT_SLOT));
+ CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
+ HeapObject::RawField(*array, FixedArray::kHeaderSize));
+ SlotsBuffer::RemoveObjectSlots(CcTest::i_isolate()->heap(), buffer,
+ array->address(),
+ array->address() + array->Size());
+ CHECK(reinterpret_cast<void*>(buffer->Get(1)) ==
+ HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset));
+ CHECK(reinterpret_cast<void*>(buffer->Get(2)) ==
+ HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset));
+ delete buffer;
+}
+
+
+TEST(FilterInvalidSlotsBufferEntries) {
+ FLAG_manual_evacuation_candidates_selection = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ SlotsBuffer* buffer = new SlotsBuffer(NULL);
+
+ // Set up a fake black object that will contain a recorded SMI, a recorded
+ // pointer to a new space object, and a recorded pointer to a non-evacuation
+ // candidate object. These object should be filtered out. Additionally,
+ // we point to an evacuation candidate object which should not be filtered
+ // out.
+
+ // Create fake object and mark it black.
+ Handle<FixedArray> fake_object = factory->NewFixedArray(23, TENURED);
+ MarkBit mark_bit = Marking::MarkBitFrom(*fake_object);
+ Marking::MarkBlack(mark_bit);
+
+ // Write a SMI into field one and record its address;
+ Object** field_smi = fake_object->RawFieldOfElementAt(0);
+ *field_smi = Smi::FromInt(100);
+ buffer->Add(field_smi);
+
+ // Write a new space reference into field 2 and record its address;
+ Handle<FixedArray> new_space_object = factory->NewFixedArray(23);
+ mark_bit = Marking::MarkBitFrom(*new_space_object);
+ Marking::MarkBlack(mark_bit);
+ Object** field_new_space = fake_object->RawFieldOfElementAt(1);
+ *field_new_space = *new_space_object;
+ buffer->Add(field_new_space);
+
+ // Write an old space reference into field 3 which points to an object not on
+ // an evacuation candidate.
+ Handle<FixedArray> old_space_object_non_evacuation =
+ factory->NewFixedArray(23, TENURED);
+ mark_bit = Marking::MarkBitFrom(*old_space_object_non_evacuation);
+ Marking::MarkBlack(mark_bit);
+ Object** field_old_space_object_non_evacuation =
+ fake_object->RawFieldOfElementAt(2);
+ *field_old_space_object_non_evacuation = *old_space_object_non_evacuation;
+ buffer->Add(field_old_space_object_non_evacuation);
+
+ // Write an old space reference into field 4 which points to an object on an
+ // evacuation candidate.
+ SimulateFullSpace(heap->old_space());
+ Handle<FixedArray> valid_object =
+ isolate->factory()->NewFixedArray(23, TENURED);
+ Page* page = Page::FromAddress(valid_object->address());
+ page->SetFlag(MemoryChunk::EVACUATION_CANDIDATE);
+ Object** valid_field = fake_object->RawFieldOfElementAt(3);
+ *valid_field = *valid_object;
+ buffer->Add(valid_field);
+
+ SlotsBuffer::RemoveInvalidSlots(heap, buffer);
+ Object** kRemovedEntry = HeapObject::RawField(heap->empty_fixed_array(),
+ FixedArrayBase::kLengthOffset);
+ CHECK_EQ(buffer->Get(0), kRemovedEntry);
+ CHECK_EQ(buffer->Get(1), kRemovedEntry);
+ CHECK_EQ(buffer->Get(2), kRemovedEntry);
+ CHECK_EQ(buffer->Get(3), valid_field);
+
+ // Clean-up to make verify heap happy.
+ mark_bit = Marking::MarkBitFrom(*fake_object);
+ Marking::MarkWhite(mark_bit);
+ mark_bit = Marking::MarkBitFrom(*new_space_object);
+ Marking::MarkWhite(mark_bit);
+ mark_bit = Marking::MarkBitFrom(*old_space_object_non_evacuation);
+ Marking::MarkWhite(mark_bit);
+
+ delete buffer;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index 86500c52d3..a744bb79a7 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -222,16 +222,23 @@ TEST(Regress3540) {
v8::internal::MemoryAllocator::CodePageAreaSize())) {
return;
}
+
Address address;
size_t size;
+ size_t request_size = code_range_size - 2 * pageSize;
address = code_range->AllocateRawMemory(
- code_range_size - 2 * pageSize, code_range_size - 2 * pageSize, &size);
+ request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
+ &size);
CHECK(address != NULL);
+
Address null_address;
size_t null_size;
+ request_size = code_range_size - pageSize;
null_address = code_range->AllocateRawMemory(
- code_range_size - pageSize, code_range_size - pageSize, &null_size);
+ request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
+ &null_size);
CHECK(null_address == NULL);
+
code_range->FreeRawMemory(address, size);
delete code_range;
memory_allocator->TearDown();
@@ -305,40 +312,44 @@ TEST(MemoryAllocator) {
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
+ CHECK(memory_allocator != nullptr);
CHECK(memory_allocator->SetUp(heap->MaxReserved(),
heap->MaxExecutableSize()));
+ TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
- int total_pages = 0;
- OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
- Page* first_page = memory_allocator->AllocatePage(
- faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
+ {
+ int total_pages = 0;
+ OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
+ Page* first_page = memory_allocator->AllocatePage(
+ faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
+
+ first_page->InsertAfter(faked_space.anchor()->prev_page());
+ CHECK(first_page->is_valid());
+ CHECK(first_page->next_page() == faked_space.anchor());
+ total_pages++;
+
+ for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+ CHECK(p->owner() == &faked_space);
+ }
- first_page->InsertAfter(faked_space.anchor()->prev_page());
- CHECK(first_page->is_valid());
- CHECK(first_page->next_page() == faked_space.anchor());
- total_pages++;
+ // Again, we should get n or n - 1 pages.
+ Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
+ &faked_space, NOT_EXECUTABLE);
+ CHECK(other->is_valid());
+ total_pages++;
+ other->InsertAfter(first_page);
+ int page_count = 0;
+ for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
+ CHECK(p->owner() == &faked_space);
+ page_count++;
+ }
+ CHECK(total_pages == page_count);
- for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
- CHECK(p->owner() == &faked_space);
- }
+ Page* second_page = first_page->next_page();
+ CHECK(second_page->is_valid());
- // Again, we should get n or n - 1 pages.
- Page* other = memory_allocator->AllocatePage(
- faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
- CHECK(other->is_valid());
- total_pages++;
- other->InsertAfter(first_page);
- int page_count = 0;
- for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
- CHECK(p->owner() == &faked_space);
- page_count++;
+ // OldSpace's destructor will tear down the space and free up all pages.
}
- CHECK(total_pages == page_count);
-
- Page* second_page = first_page->next_page();
- CHECK(second_page->is_valid());
- memory_allocator->Free(first_page);
- memory_allocator->Free(second_page);
memory_allocator->TearDown();
delete memory_allocator;
}
@@ -388,13 +399,140 @@ TEST(OldSpace) {
s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
}
- s->TearDown();
delete s;
memory_allocator->TearDown();
delete memory_allocator;
}
+TEST(CompactionSpace) {
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
+ CHECK(memory_allocator != nullptr);
+ CHECK(
+ memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
+ TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
+
+ CompactionSpace* compaction_space =
+ new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
+ CHECK(compaction_space != NULL);
+ CHECK(compaction_space->SetUp());
+
+ OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
+ CHECK(old_space != NULL);
+ CHECK(old_space->SetUp());
+
+ // Cannot loop until "Available()" since we initially have 0 bytes available
+ // and would thus neither grow, nor be able to allocate an object.
+ const int kNumObjects = 100;
+ const int kExpectedPages = kNumObjects;
+ for (int i = 0; i < kNumObjects; i++) {
+ compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
+ .ToObjectChecked();
+ }
+ int pages_in_old_space = old_space->CountTotalPages();
+ int pages_in_compaction_space = compaction_space->CountTotalPages();
+ CHECK_EQ(pages_in_compaction_space, kExpectedPages);
+ CHECK_LE(pages_in_old_space, 1);
+
+ old_space->MergeCompactionSpace(compaction_space);
+ CHECK_EQ(old_space->CountTotalPages(),
+ pages_in_old_space + pages_in_compaction_space);
+
+ delete compaction_space;
+ delete old_space;
+
+ memory_allocator->TearDown();
+ delete memory_allocator;
+}
+
+
+TEST(CompactionSpaceUsingExternalMemory) {
+ const int kObjectSize = 512;
+
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ MemoryAllocator* allocator = new MemoryAllocator(isolate);
+ CHECK(allocator != nullptr);
+ CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
+ TestMemoryAllocatorScope test_scope(isolate, allocator);
+
+ CompactionSpace* compaction_space =
+ new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
+ CHECK(compaction_space != NULL);
+ CHECK(compaction_space->SetUp());
+
+ OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
+ CHECK(old_space != NULL);
+ CHECK(old_space->SetUp());
+
+ // The linear allocation area already counts as used bytes, making
+ // exact testing impossible.
+ heap->DisableInlineAllocation();
+
+ // Test:
+ // * Allocate a backing store in old_space.
+ // * Compute the number num_rest_objects of kObjectSize objects that fit into
+ // of available memory.
+ // kNumRestObjects.
+ // * Add the rest of available memory to the compaction space.
+ // * Allocate kNumRestObjects in the compaction space.
+ // * Allocate one object more.
+ // * Merge the compaction space and compare the expected number of pages.
+
+ // Allocate a single object in old_space to initialize a backing page.
+ old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
+ // Compute the number of objects that fit into the rest in old_space.
+ intptr_t rest = static_cast<int>(old_space->Available());
+ CHECK_GT(rest, 0);
+ intptr_t num_rest_objects = rest / kObjectSize;
+ // After allocating num_rest_objects in compaction_space we allocate a bit
+ // more.
+ const intptr_t kAdditionalCompactionMemory = kObjectSize;
+ // We expect a single old_space page.
+ const intptr_t kExpectedInitialOldSpacePages = 1;
+ // We expect a single additional page in compaction space because we mostly
+ // use external memory.
+ const intptr_t kExpectedCompactionPages = 1;
+ // We expect two pages to be reachable from old_space in the end.
+ const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
+
+ Object* chunk =
+ old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
+ CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
+ CHECK(chunk != nullptr);
+ CHECK(chunk->IsHeapObject());
+
+ CHECK_EQ(compaction_space->CountTotalPages(), 0);
+ CHECK_EQ(compaction_space->Capacity(), 0);
+ // Make the rest of memory available for compaction.
+ compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
+ static_cast<int>(rest));
+ CHECK_EQ(compaction_space->CountTotalPages(), 0);
+ CHECK_EQ(compaction_space->Capacity(), rest);
+ while (num_rest_objects-- > 0) {
+ compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
+ }
+ // We only used external memory so far.
+ CHECK_EQ(compaction_space->CountTotalPages(), 0);
+ // Additional allocation.
+ compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
+ .ToObjectChecked();
+ // Now the compaction space shouldve also acquired a page.
+ CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
+
+ old_space->MergeCompactionSpace(compaction_space);
+ CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
+
+ delete compaction_space;
+ delete old_space;
+
+ allocator->TearDown();
+ delete allocator;
+}
+
+
TEST(LargeObjectSpace) {
v8::V8::Initialize();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index ce60b95495..6e0ee04a9c 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -642,6 +642,7 @@ static inline void PrintStats(const ConsStringGenerationData& data) {
template<typename BuildString>
void TestStringCharacterStream(BuildString build, int test_cases) {
+ FLAG_gc_global = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index c0cc1cb8d1..131209ceca 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -40,8 +43,8 @@ void Signal(const v8::FunctionCallbackInfo<v8::Value>& args) {
void TerminateCurrentThread(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::V8::TerminateExecution(args.GetIsolate());
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ args.GetIsolate()->TerminateExecution();
}
@@ -51,70 +54,70 @@ void Fail(const v8::FunctionCallbackInfo<v8::Value>& args) {
void Loop(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- args.GetIsolate(), "try { doloop(); fail(); } catch(e) { fail(); }");
- v8::Handle<v8::Value> result = v8::Script::Compile(source)->Run();
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(args.GetIsolate()->GetCurrentContext(),
+ "try { doloop(); fail(); } catch(e) { fail(); }");
CHECK(result.IsEmpty());
- CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ CHECK(args.GetIsolate()->IsExecutionTerminating());
}
void DoLoop(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch(args.GetIsolate());
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
- "function f() {"
- " var term = true;"
- " try {"
- " while(true) {"
- " if (term) terminate();"
- " term = false;"
- " }"
- " fail();"
- " } catch(e) {"
- " fail();"
- " }"
- "}"
- "f()"))->Run();
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(args.GetIsolate()->GetCurrentContext(),
+ "function f() {"
+ " var term = true;"
+ " try {"
+ " while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " fail();"
+ " }"
+ "}"
+ "f()");
+ CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ CHECK(args.GetIsolate()->IsExecutionTerminating());
}
void DoLoopNoCall(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch(args.GetIsolate());
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
- "var term = true;"
- "while(true) {"
- " if (term) terminate();"
- " term = false;"
- "}"))->Run();
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(args.GetIsolate()->GetCurrentContext(),
+ "var term = true;"
+ "while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ "}");
+ CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ CHECK(args.GetIsolate()->IsExecutionTerminating());
}
-v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
- v8::Isolate* isolate,
- v8::FunctionCallback terminate,
+v8::Local<v8::ObjectTemplate> CreateGlobalTemplate(
+ v8::Isolate* isolate, v8::FunctionCallback terminate,
v8::FunctionCallback doloop) {
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
- global->Set(v8::String::NewFromUtf8(isolate, "terminate"),
+ v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ global->Set(v8_str("terminate"),
v8::FunctionTemplate::New(isolate, terminate));
- global->Set(v8::String::NewFromUtf8(isolate, "fail"),
- v8::FunctionTemplate::New(isolate, Fail));
- global->Set(v8::String::NewFromUtf8(isolate, "loop"),
- v8::FunctionTemplate::New(isolate, Loop));
- global->Set(v8::String::NewFromUtf8(isolate, "doloop"),
- v8::FunctionTemplate::New(isolate, doloop));
+ global->Set(v8_str("fail"), v8::FunctionTemplate::New(isolate, Fail));
+ global->Set(v8_str("loop"), v8::FunctionTemplate::New(isolate, Loop));
+ global->Set(v8_str("doloop"), v8::FunctionTemplate::New(isolate, doloop));
return global;
}
@@ -123,19 +126,22 @@ v8::Handle<v8::ObjectTemplate> CreateGlobalTemplate(
// itself.
TEST(TerminateOnlyV8ThreadFromThreadItself) {
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global =
+ v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), TerminateCurrentThread, DoLoop);
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
- v8::Script::Compile(source)->Run();
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(CcTest::isolate()->GetCurrentContext(),
+ "try { loop(); fail(); } catch(e) { fail(); }");
+ CHECK(result.IsEmpty());
// Test that we can run the code again after thread termination.
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
- v8::Script::Compile(source)->Run();
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
+ result = CompileRun(CcTest::isolate()->GetCurrentContext(),
+ "try { loop(); fail(); } catch(e) { fail(); }");
+ CHECK(result.IsEmpty());
}
@@ -143,19 +149,21 @@ TEST(TerminateOnlyV8ThreadFromThreadItself) {
// itself in a loop that performs no calls.
TEST(TerminateOnlyV8ThreadFromThreadItselfNoLoop) {
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
CcTest::isolate(), TerminateCurrentThread, DoLoopNoCall);
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
- v8::Script::Compile(source)->Run();
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ static const char* source = "try { loop(); fail(); } catch(e) { fail(); }";
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(CcTest::isolate()->GetCurrentContext(), source);
+ CHECK(result.IsEmpty());
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Test that we can run the code again after thread termination.
- v8::Script::Compile(source)->Run();
+ result = CompileRun(CcTest::isolate()->GetCurrentContext(), source);
+ CHECK(result.IsEmpty());
}
@@ -166,8 +174,8 @@ class TerminatorThread : public v8::base::Thread {
isolate_(reinterpret_cast<v8::Isolate*>(isolate)) {}
void Run() {
semaphore->Wait();
- CHECK(!v8::V8::IsExecutionTerminating(isolate_));
- v8::V8::TerminateExecution(isolate_);
+ CHECK(!isolate_->IsExecutionTerminating());
+ isolate_->TerminateExecution();
}
private:
@@ -183,17 +191,17 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
thread.Start();
v8::HandleScope scope(CcTest::isolate());
- v8::Handle<v8::ObjectTemplate> global =
+ v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
- v8::Script::Compile(source)->Run();
-
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(CcTest::isolate()->GetCurrentContext(),
+ "try { loop(); fail(); } catch(e) { fail(); }");
+ CHECK(result.IsEmpty());
thread.Join();
delete semaphore;
semaphore = NULL;
@@ -205,39 +213,42 @@ int call_count = 0;
void TerminateOrReturnObject(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (++call_count == 10) {
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::V8::TerminateExecution(args.GetIsolate());
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ args.GetIsolate()->TerminateExecution();
return;
}
v8::Local<v8::Object> result = v8::Object::New(args.GetIsolate());
- result->Set(v8::String::NewFromUtf8(args.GetIsolate(), "x"),
- v8::Integer::New(args.GetIsolate(), 42));
+ v8::Maybe<bool> val =
+ result->Set(args.GetIsolate()->GetCurrentContext(), v8_str("x"),
+ v8::Integer::New(args.GetIsolate(), 42));
+ CHECK(val.FromJust());
args.GetReturnValue().Set(result);
}
void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch(args.GetIsolate());
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Script::Compile(
- v8::String::NewFromUtf8(args.GetIsolate(),
- "function f() {"
- " try {"
- " while(true) {"
- " terminate_or_return_object().x;"
- " }"
- " fail();"
- " } catch(e) {"
- " (function() {})();" // trigger stack check.
- " fail();"
- " }"
- "}"
- "f()"))->Run();
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(args.GetIsolate()->GetCurrentContext(),
+ "function f() {"
+ " try {"
+ " while(true) {"
+ " terminate_or_return_object().x;"
+ " }"
+ " fail();"
+ " } catch(e) {"
+ " (function() {})();" // trigger stack check.
+ " fail();"
+ " }"
+ "}"
+ "f()");
+ CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ CHECK(args.GetIsolate()->IsExecutionTerminating());
}
@@ -246,28 +257,27 @@ void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(TerminateLoadICException) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
- global->Set(
- v8::String::NewFromUtf8(isolate, "terminate_or_return_object"),
- v8::FunctionTemplate::New(isolate, TerminateOrReturnObject));
- global->Set(v8::String::NewFromUtf8(isolate, "fail"),
- v8::FunctionTemplate::New(isolate, Fail));
- global->Set(v8::String::NewFromUtf8(isolate, "loop"),
+ v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+ global->Set(v8_str("terminate_or_return_object"),
+ v8::FunctionTemplate::New(isolate, TerminateOrReturnObject));
+ global->Set(v8_str("fail"), v8::FunctionTemplate::New(isolate, Fail));
+ global->Set(v8_str("loop"),
v8::FunctionTemplate::New(isolate, LoopGetProperty));
- v8::Handle<v8::Context> context =
- v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
// Run a loop that will be infinite if thread termination does not work.
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- isolate, "try { loop(); fail(); } catch(e) { fail(); }");
+ static const char* source = "try { loop(); fail(); } catch(e) { fail(); }";
call_count = 0;
- v8::Script::Compile(source)->Run();
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(isolate->GetCurrentContext(), source);
+ CHECK(result.IsEmpty());
// Test that we can run the code again after thread termination.
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
call_count = 0;
- v8::Script::Compile(source)->Run();
+ result = CompileRun(isolate->GetCurrentContext(), source);
+ CHECK(result.IsEmpty());
}
@@ -277,17 +287,20 @@ v8::Persistent<v8::String> reenter_script_2;
void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch(args.GetIsolate());
v8::Isolate* isolate = args.GetIsolate();
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
v8::Local<v8::String> script =
v8::Local<v8::String>::New(isolate, reenter_script_1);
- v8::Script::Compile(script)->Run();
+ v8::MaybeLocal<v8::Value> result = CompileRun(script);
+ CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating(isolate));
+ CHECK(isolate->IsExecutionTerminating());
script = v8::Local<v8::String>::New(isolate, reenter_script_2);
- v8::Script::Compile(script)->Run();
+ v8::MaybeLocal<v8::Script> compiled_script =
+ v8::Script::Compile(isolate->GetCurrentContext(), script);
+ CHECK(compiled_script.IsEmpty());
}
@@ -296,12 +309,11 @@ void ReenterAfterTermination(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(TerminateAndReenterFromThreadItself) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, ReenterAfterTermination);
- v8::Handle<v8::Context> context =
- v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating());
+ CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
// Create script strings upfront as it won't work when terminating.
reenter_script_1.Reset(isolate, v8_str(
"function f() {"
@@ -319,7 +331,7 @@ TEST(TerminateAndReenterFromThreadItself) {
"f()"));
reenter_script_2.Reset(isolate, v8_str("function f() { fail(); } f()"));
CompileRun("try { loop(); fail(); } catch(e) { fail(); }");
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
// Check we can run JS again after termination.
CHECK(CompileRun("function f() { return true; } f()")->IsTrue());
reenter_script_1.Reset();
@@ -329,22 +341,24 @@ TEST(TerminateAndReenterFromThreadItself) {
void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::TryCatch try_catch(args.GetIsolate());
- CHECK(!v8::V8::IsExecutionTerminating());
- v8::Script::Compile(v8::String::NewFromUtf8(args.GetIsolate(),
- "var term = true;"
- "while(true) {"
- " if (term) terminate();"
- " term = false;"
- "}"
- "fail();"))->Run();
+ CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
+ v8::MaybeLocal<v8::Value> result =
+ CompileRun(args.GetIsolate()->GetCurrentContext(),
+ "var term = true;"
+ "while(true) {"
+ " if (term) terminate();"
+ " term = false;"
+ "}"
+ "fail();");
+ CHECK(result.IsEmpty());
CHECK(try_catch.HasCaught());
CHECK(try_catch.Exception()->IsNull());
CHECK(try_catch.Message().IsEmpty());
CHECK(!try_catch.CanContinue());
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::Isolate::GetCurrent()->IsExecutionTerminating());
CHECK(try_catch.HasTerminated());
- v8::V8::CancelTerminateExecution(CcTest::isolate());
- CHECK(!v8::V8::IsExecutionTerminating());
+ CcTest::isolate()->CancelTerminateExecution();
+ CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
}
@@ -353,15 +367,18 @@ void DoLoopCancelTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(TerminateCancelTerminateFromThreadItself) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(CcTest::isolate()));
- v8::Handle<v8::String> source = v8::String::NewFromUtf8(
- isolate, "try { doloop(); } catch(e) { fail(); } 'completed';");
+ CHECK(!CcTest::isolate()->IsExecutionTerminating());
// Check that execution completed with correct return value.
- CHECK(v8::Script::Compile(source)->Run()->Equals(v8_str("completed")));
+ v8::Local<v8::Value> result =
+ CompileRun(isolate->GetCurrentContext(),
+ "try { doloop(); } catch(e) { fail(); } 'completed';")
+ .ToLocalChecked();
+ CHECK(result->Equals(isolate->GetCurrentContext(), v8_str("completed"))
+ .FromJust());
}
@@ -375,9 +392,11 @@ void MicrotaskLoopForever(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::HandleScope scope(isolate);
// Enqueue another should-not-run task to ensure we clean out the queue
// when we terminate.
- isolate->EnqueueMicrotask(v8::Function::New(isolate, MicrotaskShouldNotRun));
+ isolate->EnqueueMicrotask(
+ v8::Function::New(isolate->GetCurrentContext(), MicrotaskShouldNotRun)
+ .ToLocalChecked());
CompileRun("terminate(); while (true) { }");
- CHECK(v8::V8::IsExecutionTerminating());
+ CHECK(v8::Isolate::GetCurrent()->IsExecutionTerminating());
}
@@ -389,18 +408,22 @@ TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
v8::Isolate* isolate = CcTest::isolate();
isolate->SetAutorunMicrotasks(false);
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global =
+ v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), Signal, DoLoop);
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
- isolate->EnqueueMicrotask(v8::Function::New(isolate, MicrotaskLoopForever));
+ isolate->EnqueueMicrotask(
+ v8::Function::New(isolate->GetCurrentContext(), MicrotaskLoopForever)
+ .ToLocalChecked());
// The second task should never be run because we bail out if we're
// terminating.
- isolate->EnqueueMicrotask(v8::Function::New(isolate, MicrotaskShouldNotRun));
+ isolate->EnqueueMicrotask(
+ v8::Function::New(isolate->GetCurrentContext(), MicrotaskShouldNotRun)
+ .ToLocalChecked());
isolate->RunMicrotasks();
- v8::V8::CancelTerminateExecution(isolate);
+ isolate->CancelTerminateExecution();
isolate->RunMicrotasks(); // should not run MicrotaskShouldNotRun
thread.Join();
@@ -420,9 +443,9 @@ static void CounterCallback(v8::Isolate* isolate, void* data) {
TEST(PostponeTerminateException) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global =
+ v8::Local<v8::ObjectTemplate> global =
CreateGlobalTemplate(CcTest::isolate(), TerminateCurrentThread, DoLoop);
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global);
v8::Context::Scope context_scope(context);
@@ -432,7 +455,7 @@ TEST(PostponeTerminateException) {
{ // Postpone terminate execution interrupts.
i::PostponeInterruptsScope p1(CcTest::i_isolate(),
- i::StackGuard::TERMINATE_EXECUTION) ;
+ i::StackGuard::TERMINATE_EXECUTION);
// API interrupts should still be triggered.
CcTest::isolate()->RequestInterrupt(&CounterCallback, NULL);
@@ -468,9 +491,9 @@ TEST(PostponeTerminateException) {
TEST(ErrorObjectAfterTermination) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
v8::Context::Scope context_scope(context);
- v8::V8::TerminateExecution(isolate);
+ isolate->TerminateExecution();
v8::Local<v8::Value> error = v8::Exception::Error(v8_str("error"));
// TODO(yangguo): crbug/403509. Check for empty handle instead.
CHECK(error->IsUndefined());
@@ -478,16 +501,17 @@ TEST(ErrorObjectAfterTermination) {
void InnerTryCallTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
- v8::Handle<v8::Object> global = CcTest::global();
- v8::Handle<v8::Function> loop =
- v8::Handle<v8::Function>::Cast(global->Get(v8_str("loop")));
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
+ v8::Local<v8::Object> global = CcTest::global();
+ v8::Local<v8::Function> loop = v8::Local<v8::Function>::Cast(
+ global->Get(CcTest::isolate()->GetCurrentContext(), v8_str("loop"))
+ .ToLocalChecked());
i::MaybeHandle<i::Object> result =
i::Execution::TryCall(v8::Utils::OpenHandle((*loop)),
v8::Utils::OpenHandle((*global)), 0, NULL, NULL);
CHECK(result.is_null());
// TryCall ignores terminate execution, but rerequests the interrupt.
- CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ CHECK(!args.GetIsolate()->IsExecutionTerminating());
CHECK(CompileRun("1 + 1;").IsEmpty());
}
@@ -495,12 +519,12 @@ void InnerTryCallTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
TEST(TerminationInInnerTryCall) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template = CreateGlobalTemplate(
+ v8::Local<v8::ObjectTemplate> global_template = CreateGlobalTemplate(
CcTest::isolate(), TerminateCurrentThread, DoLoopNoCall);
global_template->Set(
v8_str("inner_try_call_terminate"),
v8::FunctionTemplate::New(isolate, InnerTryCallTerminate));
- v8::Handle<v8::Context> context =
+ v8::Local<v8::Context> context =
v8::Context::New(CcTest::isolate(), NULL, global_template);
v8::Context::Scope context_scope(context);
{
@@ -508,8 +532,10 @@ TEST(TerminationInInnerTryCall) {
CompileRun("inner_try_call_terminate()");
CHECK(try_catch.HasTerminated());
}
- CHECK_EQ(4, CompileRun("2 + 2")->ToInt32()->Int32Value());
- CHECK(!v8::V8::IsExecutionTerminating());
+ v8::Maybe<int32_t> result = CompileRun("2 + 2")->Int32Value(
+ v8::Isolate::GetCurrent()->GetCurrentContext());
+ CHECK_EQ(4, result.FromJust());
+ CHECK(!v8::Isolate::GetCurrent()->IsExecutionTerminating());
}
@@ -517,23 +543,29 @@ TEST(TerminateAndTryCall) {
i::FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope scope(isolate);
- v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ v8::Local<v8::ObjectTemplate> global = CreateGlobalTemplate(
isolate, TerminateCurrentThread, DoLoopCancelTerminate);
- v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
v8::Context::Scope context_scope(context);
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
v8::TryCatch try_catch(isolate);
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
// Terminate execution has been triggered inside TryCall, but re-requested
// to trigger later.
CHECK(CompileRun("terminate(); reference_error();").IsEmpty());
CHECK(try_catch.HasCaught());
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
- CHECK(CcTest::global()->Get(v8_str("terminate"))->IsFunction());
+ CHECK(!isolate->IsExecutionTerminating());
+ v8::Local<v8::Value> value =
+ CcTest::global()
+ ->Get(isolate->GetCurrentContext(), v8_str("terminate"))
+ .ToLocalChecked();
+ CHECK(value->IsFunction());
// The first stack check after terminate has been re-requested fails.
CHECK(CompileRun("1 + 1").IsEmpty());
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(!isolate->IsExecutionTerminating());
// V8 then recovers.
- CHECK_EQ(4, CompileRun("2 + 2")->ToInt32()->Int32Value());
- CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ v8::Maybe<int32_t> result = CompileRun("2 + 2")->Int32Value(
+ v8::Isolate::GetCurrent()->GetCurrentContext());
+ CHECK_EQ(4, result.FromJust());
+ CHECK(!isolate->IsExecutionTerminating());
}
diff --git a/deps/v8/test/cctest/test-threads.cc b/deps/v8/test/cctest/test-threads.cc
index a9058a523a..71dd49f50b 100644
--- a/deps/v8/test/cctest/test-threads.cc
+++ b/deps/v8/test/cctest/test-threads.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "src/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 800c2a0e44..60a0706b04 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <stdlib.h>
#include <utility>
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
index 394f6194fd..f55c560a28 100644
--- a/deps/v8/test/cctest/test-typedarrays.cc
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <stdlib.h>
#include "src/v8.h"
@@ -10,13 +13,14 @@
#include "src/api.h"
#include "src/heap/heap.h"
#include "src/objects.h"
-#include "src/v8.h"
using namespace v8::internal;
void TestArrayBufferViewContents(LocalContext& env, bool should_use_buffer) {
- v8::Local<v8::Object> obj_a =
- v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ v8::Local<v8::Object> obj_a = v8::Local<v8::Object>::Cast(
+ env->Global()
+ ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str("a"))
+ .ToLocalChecked());
CHECK(obj_a->IsArrayBufferView());
v8::Local<v8::ArrayBufferView> array_buffer_view =
v8::Local<v8::ArrayBufferView>::Cast(obj_a);
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index fe07093077..22a5ca74f5 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <vector>
#include "src/hydrogen-types.h"
diff --git a/deps/v8/test/cctest/test-typing-reset.cc b/deps/v8/test/cctest/test-typing-reset.cc
new file mode 100644
index 0000000000..426f411fff
--- /dev/null
+++ b/deps/v8/test/cctest/test-typing-reset.cc
@@ -0,0 +1,301 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/ast-expression-visitor.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "src/typing-reset.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/function-tester.h"
+#include "test/cctest/expression-type-collector.h"
+#include "test/cctest/expression-type-collector-macros.h"
+
+#define INT32_TYPE Bounds(Type::Signed32(), Type::Signed32())
+
+using namespace v8::internal;
+
+namespace {
+
+class TypeSetter : public AstExpressionVisitor {
+ public:
+ TypeSetter(Isolate* isolate, Zone* zone, FunctionLiteral* root)
+ : AstExpressionVisitor(isolate, zone, root) {}
+
+ protected:
+ void VisitExpression(Expression* expression) {
+ expression->set_bounds(INT32_TYPE);
+ }
+};
+
+
+void CheckAllSame(ZoneVector<ExpressionTypeEntry>& types,
+ Bounds expected_type) {
+ CHECK_TYPES_BEGIN {
+ // function logSum
+ CHECK_EXPR(FunctionLiteral, expected_type) {
+ CHECK_EXPR(FunctionLiteral, expected_type) {
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(start, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(start, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(end, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(end, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(sum, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(p, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(q, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ // for (p = start << 3, q = end << 3;
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(p, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(start, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(q, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(end, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ }
+ // (p|0) < (q|0);
+ CHECK_EXPR(CompareOperation, expected_type) {
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(p, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(q, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // p = (p + 8)|0) {\n"
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(p, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(p, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // sum = sum + +log(values[p>>3]);
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(sum, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(sum, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(Call, expected_type) {
+ CHECK_VAR(log, expected_type);
+ CHECK_EXPR(Property, expected_type) {
+ CHECK_VAR(values, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(p, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ }
+ // return +sum;
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(sum, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // function geometricMean
+ CHECK_EXPR(FunctionLiteral, expected_type) {
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(start, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(start, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(end, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(end, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // return +exp(+logSum(start, end) / +((end - start)|0));
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(Call, expected_type) {
+ CHECK_VAR(exp, expected_type);
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(Call, expected_type) {
+ CHECK_VAR(logSum, expected_type);
+ CHECK_VAR(start, expected_type);
+ CHECK_VAR(end, expected_type);
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_EXPR(BinaryOperation, expected_type) {
+ CHECK_VAR(end, expected_type);
+ CHECK_VAR(start, expected_type);
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // "use asm";
+ CHECK_EXPR(Literal, expected_type);
+ // var exp = stdlib.Math.exp;
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(exp, expected_type);
+ CHECK_EXPR(Property, expected_type) {
+ CHECK_EXPR(Property, expected_type) {
+ CHECK_VAR(stdlib, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // var log = stdlib.Math.log;
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(log, expected_type);
+ CHECK_EXPR(Property, expected_type) {
+ CHECK_EXPR(Property, expected_type) {
+ CHECK_VAR(stdlib, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_EXPR(Literal, expected_type);
+ }
+ }
+ // var values = new stdlib.Float64Array(buffer);
+ CHECK_EXPR(Assignment, expected_type) {
+ CHECK_VAR(values, expected_type);
+ CHECK_EXPR(CallNew, expected_type) {
+ CHECK_EXPR(Property, expected_type) {
+ CHECK_VAR(stdlib, expected_type);
+ CHECK_EXPR(Literal, expected_type);
+ }
+ CHECK_VAR(buffer, expected_type);
+ }
+ }
+ // return { geometricMean: geometricMean };
+ CHECK_EXPR(ObjectLiteral, expected_type) {
+ CHECK_VAR(geometricMean, expected_type);
+ }
+ }
+ }
+ CHECK_TYPES_END
+}
+}
+
+
+TEST(ResetTypingInfo) {
+ const char test_function[] =
+ "function GeometricMean(stdlib, foreign, buffer) {\n"
+ " \"use asm\";\n"
+ "\n"
+ " var exp = stdlib.Math.exp;\n"
+ " var log = stdlib.Math.log;\n"
+ " var values = new stdlib.Float64Array(buffer);\n"
+ "\n"
+ " function logSum(start, end) {\n"
+ " start = start|0;\n"
+ " end = end|0;\n"
+ "\n"
+ " var sum = 0.0, p = 0, q = 0;\n"
+ "\n"
+ " // asm.js forces byte addressing of the heap by requiring shifting "
+ "by 3\n"
+ " for (p = start << 3, q = end << 3; (p|0) < (q|0); p = (p + 8)|0) {\n"
+ " sum = sum + +log(values[p>>3]);\n"
+ " }\n"
+ "\n"
+ " return +sum;\n"
+ " }\n"
+ "\n"
+ " function geometricMean(start, end) {\n"
+ " start = start|0;\n"
+ " end = end|0;\n"
+ "\n"
+ " return +exp(+logSum(start, end) / +((end - start)|0));\n"
+ " }\n"
+ "\n"
+ " return { geometricMean: geometricMean };\n"
+ "}\n";
+
+ v8::V8::Initialize();
+ HandleAndZoneScope handles;
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+
+ i::Handle<i::String> source_code =
+ factory->NewStringFromUtf8(i::CStrVector(test_function))
+ .ToHandleChecked();
+
+ i::Handle<i::Script> script = factory->NewScript(source_code);
+
+ i::ParseInfo info(handles.main_zone(), script);
+ i::Parser parser(&info);
+ parser.set_allow_harmony_arrow_functions(true);
+ parser.set_allow_harmony_sloppy(true);
+ info.set_global();
+ info.set_lazy(false);
+ info.set_allow_lazy_parsing(false);
+ info.set_toplevel(true);
+
+ CHECK(i::Compiler::ParseAndAnalyze(&info));
+ FunctionLiteral* root =
+ info.scope()->declarations()->at(0)->AsFunctionDeclaration()->fun();
+
+ // Core of the test.
+ ZoneVector<ExpressionTypeEntry> types(handles.main_zone());
+ ExpressionTypeCollector(isolate, handles.main_zone(), root, &types).Run();
+ CheckAllSame(types, Bounds::Unbounded());
+
+ TypeSetter(isolate, handles.main_zone(), root).Run();
+
+ ExpressionTypeCollector(isolate, handles.main_zone(), root, &types).Run();
+ CheckAllSame(types, INT32_TYPE);
+
+ TypingReseter(isolate, handles.main_zone(), root).Run();
+
+ ExpressionTypeCollector(isolate, handles.main_zone(), root, &types).Run();
+ CheckAllSame(types, Bounds::Unbounded());
+}
diff --git a/deps/v8/test/cctest/test-unbound-queue.cc b/deps/v8/test/cctest/test-unbound-queue.cc
index 6da91e6943..a19b783392 100644
--- a/deps/v8/test/cctest/test-unbound-queue.cc
+++ b/deps/v8/test/cctest/test-unbound-queue.cc
@@ -27,10 +27,13 @@
//
// Tests of the unbound queue.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "src/v8.h"
#include "test/cctest/cctest.h"
-#include "src/unbound-queue-inl.h"
+#include "src/profiler/unbound-queue-inl.h"
using i::UnboundQueue;
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index 3a629bdca0..3dd56ee09e 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// TODO(mythria): Remove this define after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <stdlib.h>
#include <utility>
@@ -11,9 +14,11 @@
#include "src/execution.h"
#include "src/factory.h"
#include "src/global-handles.h"
+#include "src/heap/slots-buffer.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
#include "test/cctest/cctest.h"
+#include "test/cctest/heap-tester.h"
using namespace v8::base;
using namespace v8::internal;
@@ -49,8 +54,10 @@ static Handle<String> MakeName(const char* str, int suffix) {
Handle<JSObject> GetObject(const char* name) {
- return v8::Utils::OpenHandle(
- *v8::Handle<v8::Object>::Cast(CcTest::global()->Get(v8_str(name))));
+ return v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
+ CcTest::global()
+ ->Get(v8::Isolate::GetCurrent()->GetCurrentContext(), v8_str(name))
+ .ToLocalChecked()));
}
@@ -1397,88 +1404,6 @@ TEST(StoreBufferScanOnScavenge) {
}
-static int LenFromSize(int size) {
- return (size - FixedArray::kHeaderSize) / kPointerSize;
-}
-
-
-TEST(WriteBarriersInCopyJSObject) {
- FLAG_max_semi_space_size = 1; // Ensure new space is not growing.
- CcTest::InitializeVM();
- Isolate* isolate = CcTest::i_isolate();
- TestHeap* heap = CcTest::test_heap();
-
- v8::HandleScope scope(CcTest::isolate());
-
- // The plan: create JSObject which contains unboxed double value that looks
- // like a reference to an object in new space.
- // Then clone this object (forcing it to go into old space) and check
- // that the value of the unboxed double property of the cloned object has
- // was not corrupted by GC.
-
- // Step 1: prepare a map for the object. We add unboxed double property to it.
- // Create a map with single inobject property.
- Handle<Map> my_map = Map::Create(isolate, 1);
- Handle<String> name = isolate->factory()->InternalizeUtf8String("foo");
- my_map = Map::CopyWithField(my_map, name, HeapType::Any(isolate), NONE,
- Representation::Double(),
- INSERT_TRANSITION).ToHandleChecked();
-
- int object_size = my_map->instance_size();
-
- // Step 2: allocate a lot of objects so to almost fill new space: we need
- // just enough room to allocate JSObject and thus fill the newspace.
-
- int allocation_amount =
- Min(FixedArray::kMaxSize, Page::kMaxRegularHeapObjectSize + kPointerSize);
- int allocation_len = LenFromSize(allocation_amount);
- NewSpace* new_space = heap->new_space();
- Address* top_addr = new_space->allocation_top_address();
- Address* limit_addr = new_space->allocation_limit_address();
- while ((*limit_addr - *top_addr) > allocation_amount) {
- CHECK(!heap->always_allocate());
- Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
- CHECK(new_space->Contains(array));
- }
-
- // Step 3: now allocate fixed array and JSObject to fill the whole new space.
- int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
- int fixed_array_len = LenFromSize(to_fill);
- CHECK(fixed_array_len < FixedArray::kMaxLength);
-
- CHECK(!heap->always_allocate());
- Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
- CHECK(new_space->Contains(array));
-
- Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
- CHECK(new_space->Contains(object));
- JSObject* jsobject = JSObject::cast(object);
- CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
- CHECK_EQ(0, jsobject->properties()->length());
-
- // Construct a double value that looks like a pointer to the new space object
- // and store it into the obj.
- Address fake_object = reinterpret_cast<Address>(array) + kPointerSize;
- double boom_value = bit_cast<double>(fake_object);
- FieldIndex index = FieldIndex::ForDescriptor(*my_map, 0);
- jsobject->RawFastDoublePropertyAtPut(index, boom_value);
-
- CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
-
- // Step 4: clone jsobject, but force always allocate first to create a clone
- // in old pointer space.
- AlwaysAllocateScope aa_scope(isolate);
- Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
- Handle<JSObject> clone(JSObject::cast(clone_obj));
- CHECK(heap->old_space()->Contains(clone->address()));
-
- CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
-
- // The value in cloned object should not be corrupted by GC.
- CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
-}
-
-
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor, int double_descriptor,
bool check_tagged_value = true) {
@@ -1545,7 +1470,6 @@ static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int double_descriptor,
bool check_tagged_value = true) {
if (FLAG_never_compact || !FLAG_incremental_marking) return;
- FLAG_stress_compaction = true;
FLAG_manual_evacuation_candidates_selection = true;
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
diff --git a/deps/v8/test/cctest/test-unique.cc b/deps/v8/test/cctest/test-unique.cc
index 15b800dc05..1e18fd8f97 100644
--- a/deps/v8/test/cctest/test-unique.cc
+++ b/deps/v8/test/cctest/test-unique.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <stdlib.h>
#include "src/v8.h"
diff --git a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
index aef2ccf288..acfc4266ae 100644
--- a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
+++ b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <stdlib.h>
#include "src/v8.h"
@@ -23,81 +26,98 @@ TEST(Unscopables) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope handle_scope(isolate);
+ v8::Local<v8::Context> current_context = isolate->GetCurrentContext();
v8::Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
v8::Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
t1->SetHiddenPrototype(true);
- v8::Local<v8::Object> object = t0->GetFunction()->NewInstance();
- v8::Local<v8::Object> hidden_prototype = t1->GetFunction()->NewInstance();
-
- object->SetPrototype(hidden_prototype);
-
- context->Global()->Set(v8_str("object"), object);
- context->Global()->Set(v8_str("hidden_prototype"), hidden_prototype);
-
- CHECK_EQ(1, CompileRun(
- "var result;"
- "var x = 0;"
- "object.x = 1;"
- "with (object) {"
- " result = x;"
- "}"
- "result")->Int32Value());
+ v8::Local<v8::Object> object = t0->GetFunction(current_context)
+ .ToLocalChecked()
+ ->NewInstance(current_context)
+ .ToLocalChecked();
+ v8::Local<v8::Object> hidden_prototype = t1->GetFunction(current_context)
+ .ToLocalChecked()
+ ->NewInstance(current_context)
+ .ToLocalChecked();
+
+ CHECK(object->SetPrototype(current_context, hidden_prototype).FromJust());
+
+ context->Global()
+ ->Set(current_context, v8_str("object"), object)
+ .FromMaybe(false);
+ context->Global()
+ ->Set(current_context, v8_str("hidden_prototype"), hidden_prototype)
+ .FromMaybe(false);
+
+ CHECK_EQ(1, CompileRun("var result;"
+ "var x = 0;"
+ "object.x = 1;"
+ "with (object) {"
+ " result = x;"
+ "}"
+ "result")
+ ->Int32Value(current_context)
+ .FromJust());
Cleanup();
- CHECK_EQ(2, CompileRun(
- "var result;"
- "var x = 0;"
- "hidden_prototype.x = 2;"
- "with (object) {"
- " result = x;"
- "}"
- "result")->Int32Value());
+ CHECK_EQ(2, CompileRun("var result;"
+ "var x = 0;"
+ "hidden_prototype.x = 2;"
+ "with (object) {"
+ " result = x;"
+ "}"
+ "result")
+ ->Int32Value(current_context)
+ .FromJust());
Cleanup();
- CHECK_EQ(0, CompileRun(
- "var result;"
- "var x = 0;"
- "object.x = 3;"
- "object[Symbol.unscopables] = {x: true};"
- "with (object) {"
- " result = x;"
- "}"
- "result")->Int32Value());
+ CHECK_EQ(0, CompileRun("var result;"
+ "var x = 0;"
+ "object.x = 3;"
+ "object[Symbol.unscopables] = {x: true};"
+ "with (object) {"
+ " result = x;"
+ "}"
+ "result")
+ ->Int32Value(current_context)
+ .FromJust());
Cleanup();
- CHECK_EQ(0, CompileRun(
- "var result;"
- "var x = 0;"
- "hidden_prototype.x = 4;"
- "hidden_prototype[Symbol.unscopables] = {x: true};"
- "with (object) {"
- " result = x;"
- "}"
- "result")->Int32Value());
+ CHECK_EQ(0, CompileRun("var result;"
+ "var x = 0;"
+ "hidden_prototype.x = 4;"
+ "hidden_prototype[Symbol.unscopables] = {x: true};"
+ "with (object) {"
+ " result = x;"
+ "}"
+ "result")
+ ->Int32Value(current_context)
+ .FromJust());
Cleanup();
- CHECK_EQ(0, CompileRun(
- "var result;"
- "var x = 0;"
- "object.x = 5;"
- "hidden_prototype[Symbol.unscopables] = {x: true};"
- "with (object) {"
- " result = x;"
- "}"
- "result;")->Int32Value());
+ CHECK_EQ(0, CompileRun("var result;"
+ "var x = 0;"
+ "object.x = 5;"
+ "hidden_prototype[Symbol.unscopables] = {x: true};"
+ "with (object) {"
+ " result = x;"
+ "}"
+ "result;")
+ ->Int32Value(current_context)
+ .FromJust());
Cleanup();
- CHECK_EQ(0, CompileRun(
- "var result;"
- "var x = 0;"
- "hidden_prototype.x = 6;"
- "object[Symbol.unscopables] = {x: true};"
- "with (object) {"
- " result = x;"
- "}"
- "result")->Int32Value());
+ CHECK_EQ(0, CompileRun("var result;"
+ "var x = 0;"
+ "hidden_prototype.x = 6;"
+ "object[Symbol.unscopables] = {x: true};"
+ "with (object) {"
+ " result = x;"
+ "}"
+ "result")
+ ->Int32Value(current_context)
+ .FromJust());
}
}
diff --git a/deps/v8/test/cctest/test-utils-arm64.cc b/deps/v8/test/cctest/test-utils-arm64.cc
index b0b77bc97d..f125cc09bf 100644
--- a/deps/v8/test/cctest/test-utils-arm64.cc
+++ b/deps/v8/test/cctest/test-utils-arm64.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "src/v8.h"
#include "src/arm64/utils-arm64.h"
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index 5045b7e591..979c6f251b 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after it is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <stdlib.h>
#include <vector>
diff --git a/deps/v8/test/cctest/test-version.cc b/deps/v8/test/cctest/test-version.cc
index 50fca16871..77727a03c5 100644
--- a/deps/v8/test/cctest/test-version.cc
+++ b/deps/v8/test/cctest/test-version.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "src/v8.h"
#include "src/version.h"
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 31b812e287..2fd546d526 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <utility>
#include "src/v8.h"
@@ -89,9 +92,9 @@ TEST(Weakness) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(23), isolate);
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
- Runtime::WeakCollectionSet(weakmap, key, object, hash);
+ JSWeakCollection::Set(weakmap, key, object, hash);
int32_t object_hash = Object::GetOrCreateHash(isolate, object)->value();
- Runtime::WeakCollectionSet(weakmap, object, smi, object_hash);
+ JSWeakCollection::Set(weakmap, object, smi, object_hash);
}
CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
@@ -147,7 +150,7 @@ TEST(Shrinking) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(i), isolate);
int32_t object_hash = Object::GetOrCreateHash(isolate, object)->value();
- Runtime::WeakCollectionSet(weakmap, object, smi, object_hash);
+ JSWeakCollection::Set(weakmap, object, smi, object_hash);
}
}
@@ -196,7 +199,7 @@ TEST(Regress2060a) {
CHECK(!heap->InNewSpace(object->address()));
CHECK(!first_page->Contains(object->address()));
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
- Runtime::WeakCollectionSet(weakmap, key, object, hash);
+ JSWeakCollection::Set(weakmap, key, object, hash);
}
}
@@ -239,7 +242,7 @@ TEST(Regress2060b) {
for (int i = 0; i < 32; i++) {
Handle<Smi> smi(Smi::FromInt(i), isolate);
int32_t hash = Object::GetOrCreateHash(isolate, keys[i])->value();
- Runtime::WeakCollectionSet(weakmap, keys[i], smi, hash);
+ JSWeakCollection::Set(weakmap, keys[i], smi, hash);
}
// Force compacting garbage collection. The subsequent collections are used
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 3595af288f..b6f0a78420 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include <utility>
#include "src/v8.h"
@@ -90,7 +93,7 @@ TEST(WeakSet_Weakness) {
HandleScope scope(isolate);
Handle<Smi> smi(Smi::FromInt(23), isolate);
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
- Runtime::WeakCollectionSet(weakset, key, smi, hash);
+ JSWeakCollection::Set(weakset, key, smi, hash);
}
CHECK_EQ(1, ObjectHashTable::cast(weakset->table())->NumberOfElements());
@@ -146,7 +149,7 @@ TEST(WeakSet_Shrinking) {
Handle<JSObject> object = factory->NewJSObjectFromMap(map);
Handle<Smi> smi(Smi::FromInt(i), isolate);
int32_t hash = Object::GetOrCreateHash(isolate, object)->value();
- Runtime::WeakCollectionSet(weakset, object, smi, hash);
+ JSWeakCollection::Set(weakset, object, smi, hash);
}
}
@@ -195,7 +198,7 @@ TEST(WeakSet_Regress2060a) {
CHECK(!heap->InNewSpace(object->address()));
CHECK(!first_page->Contains(object->address()));
int32_t hash = Object::GetOrCreateHash(isolate, key)->value();
- Runtime::WeakCollectionSet(weakset, key, object, hash);
+ JSWeakCollection::Set(weakset, key, object, hash);
}
}
@@ -238,7 +241,7 @@ TEST(WeakSet_Regress2060b) {
for (int i = 0; i < 32; i++) {
Handle<Smi> smi(Smi::FromInt(i), isolate);
int32_t hash = Object::GetOrCreateHash(isolate, keys[i])->value();
- Runtime::WeakCollectionSet(weakset, keys[i], smi, hash);
+ JSWeakCollection::Set(weakset, keys[i], smi, hash);
}
// Force compacting garbage collection. The subsequent collections are used
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index e7f097f86f..5e558cfd05 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -25,9 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// TODO(mythria): Remove this after this flag is turned on globally
+#define V8_IMMINENT_DEPRECATION_WARNINGS
+
#include "test/cctest/trace-extension.h"
-#include "src/sampler.h"
+#include "src/profiler/sampler.h"
#include "src/vm-state-inl.h"
#include "test/cctest/cctest.h"
@@ -41,20 +44,35 @@ const char* TraceExtension::kSource =
"native function js_entry_sp_level2();";
-v8::Handle<v8::FunctionTemplate> TraceExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Handle<v8::String> name) {
- if (name->Equals(v8::String::NewFromUtf8(isolate, "trace"))) {
+v8::Local<v8::FunctionTemplate> TraceExtension::GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name) {
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ if (name->Equals(context, v8::String::NewFromUtf8(isolate, "trace",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate, TraceExtension::Trace);
- } else if (name->Equals(v8::String::NewFromUtf8(isolate, "js_trace"))) {
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "js_trace",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSTrace);
- } else if (name->Equals(v8::String::NewFromUtf8(isolate, "js_entry_sp"))) {
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "js_entry_sp",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySP);
- } else if (name->Equals(v8::String::NewFromUtf8(isolate,
- "js_entry_sp_level2"))) {
+ } else if (name->Equals(context,
+ v8::String::NewFromUtf8(isolate, "js_entry_sp_level2",
+ v8::NewStringType::kNormal)
+ .ToLocalChecked())
+ .FromJust()) {
return v8::FunctionTemplate::New(isolate, TraceExtension::JSEntrySPLevel2);
} else {
CHECK(false);
- return v8::Handle<v8::FunctionTemplate>();
+ return v8::Local<v8::FunctionTemplate>();
}
}
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index 919eda5bb5..792e437a89 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -33,12 +33,13 @@
namespace v8 {
namespace internal {
+struct TickSample;
+
class TraceExtension : public v8::Extension {
public:
TraceExtension() : v8::Extension("v8/trace", kSource) { }
- virtual v8::Handle<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate,
- v8::Handle<v8::String> name);
+ virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
+ v8::Isolate* isolate, v8::Local<v8::String> name);
static void Trace(const v8::FunctionCallbackInfo<v8::Value>& args);
static void JSTrace(const v8::FunctionCallbackInfo<v8::Value>& args);
static void JSEntrySP(const v8::FunctionCallbackInfo<v8::Value>& args);
diff --git a/deps/v8/test/cctest/types-fuzz.h b/deps/v8/test/cctest/types-fuzz.h
index 60f054a66e..7aa8d854ee 100644
--- a/deps/v8/test/cctest/types-fuzz.h
+++ b/deps/v8/test/cctest/types-fuzz.h
@@ -39,7 +39,7 @@ template<class Type, class TypeHandle, class Region>
class Types {
public:
Types(Region* region, Isolate* isolate, v8::base::RandomNumberGenerator* rng)
- : region_(region), rng_(rng) {
+ : region_(region), isolate_(isolate), rng_(rng) {
#define DECLARE_TYPE(name, value) \
name = Type::name(region); \
types.push_back(name);
@@ -304,6 +304,20 @@ class Types {
}
return type;
}
+ case 8: { // simd
+ static const int num_simd_types =
+ #define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) +1
+ SIMD128_TYPES(COUNT_SIMD_TYPE);
+ #undef COUNT_SIMD_TYPE
+ TypeHandle (*simd_constructors[num_simd_types])(Isolate*, Region*) = {
+ #define COUNT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
+ &Type::Name,
+ SIMD128_TYPES(COUNT_SIMD_TYPE)
+ #undef COUNT_SIMD_TYPE
+ };
+ return simd_constructors[rng_->NextInt(num_simd_types)](
+ isolate_, region_);
+ }
default: { // union
int n = rng_->NextInt(10);
TypeHandle type = None;
@@ -321,6 +335,7 @@ class Types {
private:
Region* region_;
+ Isolate* isolate_;
v8::base::RandomNumberGenerator* rng_;
};
diff --git a/deps/v8/test/intl/intl.status b/deps/v8/test/intl/intl.status
index d48d695165..6e3192b490 100644
--- a/deps/v8/test/intl/intl.status
+++ b/deps/v8/test/intl/intl.status
@@ -35,9 +35,6 @@
# TODO(jochen): The following test is flaky.
'overrides/caching': [PASS, FAIL],
- # BUG(2899): default locale for search fails on mac and on android.
- 'collator/default-locale': [['system == macos or arch == android_arm or arch == android_ia32', FAIL]],
-
# BUG(v8:3454).
'date-format/parse-MMMdy': [FAIL],
'date-format/parse-mdyhms': [FAIL],
diff --git a/deps/v8/test/intl/testcfg.py b/deps/v8/test/intl/testcfg.py
index 9fc087e5f5..51fa1e1a88 100644
--- a/deps/v8/test/intl/testcfg.py
+++ b/deps/v8/test/intl/testcfg.py
@@ -46,7 +46,9 @@ class IntlTestSuite(testsuite.TestSuite):
for filename in files:
if (filename.endswith(".js") and filename != "assert.js" and
filename != "utils.js"):
- testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index bff5b6536f..911f9f3289 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -9,11 +9,24 @@
"resources": ["base.js"],
"tests": [
{
+ "name": "RestParameters",
+ "path": ["RestParameters"],
+ "main": "run.js",
+ "resources": ["rest.js"],
+ "flags": ["--harmony-rest-parameters"],
+ "run_count": 5,
+ "units": "score",
+ "results_regexp": "^%s\\-RestParameters\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Basic1"}
+ ]
+ },
+ {
"name": "SpreadCalls",
"path": ["SpreadCalls"],
"main": "run.js",
"resources": ["spreadcalls.js"],
- "flags": ["--harmony-spreadcalls"],
+ "flags": ["--harmony-spread_calls"],
"run_count": 5,
"units": "score",
"results_regexp": "^%s\\-SpreadCalls\\(Score\\): (.+)$",
@@ -100,7 +113,6 @@
"path": ["Object"],
"main": "run.js",
"resources": ["assign.js"],
- "flags": ["--harmony-object"],
"results_regexp": "^%s\\-Object\\(Score\\): (.+)$",
"tests": [
{"name": "Assign"}
diff --git a/deps/v8/test/js-perf-test/RestParameters/rest.js b/deps/v8/test/js-perf-test/RestParameters/rest.js
new file mode 100644
index 0000000000..cf52f5f45f
--- /dev/null
+++ b/deps/v8/test/js-perf-test/RestParameters/rest.js
@@ -0,0 +1,30 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('Basic1', [1000], [
+ new Benchmark('BasicRest1', false, false, 0,
+ BasicRest1, BasicRest1Setup, BasicRest1TearDown)
+]);
+
+// ----------------------------------------------------------------------------
+
+var result;
+
+function basic_rest_fn_1(factor, ...values) {
+ var result = 0;
+ for (var i = 0; i < values.length; ++i) {
+ result += (factor * values[i]);
+ }
+ return result;
+}
+
+function BasicRest1Setup() {}
+
+function BasicRest1() {
+ result = basic_rest_fn_1(10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+}
+
+function BasicRest1TearDown() {
+ return result == 550;
+}
diff --git a/deps/v8/test/js-perf-test/RestParameters/run.js b/deps/v8/test/js-perf-test/RestParameters/run.js
new file mode 100644
index 0000000000..71d3d43bc2
--- /dev/null
+++ b/deps/v8/test/js-perf-test/RestParameters/run.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+load('rest.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-RestParameters(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/message/arrow-missing.out b/deps/v8/test/message/arrow-missing.out
index f042a20fad..bad6157a0a 100644
--- a/deps/v8/test/message/arrow-missing.out
+++ b/deps/v8/test/message/arrow-missing.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Expected () to start arrow function, but got ';' instead of '=>'
+*%(basename)s:7: SyntaxError: Unexpected token )
function foo() { return(); }
- ^
-SyntaxError: Expected () to start arrow function, but got ';' instead of '=>'
+ ^
+SyntaxError: Unexpected token )
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index cfe22f15d7..bc73510c09 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -52,7 +52,9 @@ class MessageTestSuite(testsuite.TestSuite):
files.sort()
for filename in files:
if filename.endswith(".js"):
- testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
index a19a931adb..bf884fca47 100644
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ b/deps/v8/test/mjsunit/array-natives-elements.js
@@ -30,6 +30,7 @@
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
+var __sequence = 0;
function array_natives_test() {
// Ensure small array literals start in specific element kind mode.
@@ -41,7 +42,6 @@ function array_natives_test() {
// This code exists to eliminate the learning influence of AllocationSites
// on the following tests.
- var __sequence = 0;
function make_array_string(literal) {
this.__sequence = this.__sequence + 1;
return "/* " + this.__sequence + " */ " + literal;
@@ -53,22 +53,22 @@ function array_natives_test() {
// Push
var a0 = make_array("[1, 2, 3]");
assertTrue(%HasFastSmiElements(a0));
- a0.push(4);
+ assertEquals(4, a0.push(4));
assertTrue(%HasFastSmiElements(a0));
- a0.push(1.3);
+ assertEquals(5, a0.push(1.3));
assertTrue(%HasFastDoubleElements(a0));
- a0.push(1.5);
+ assertEquals(6, a0.push(1.5));
assertTrue(%HasFastDoubleElements(a0));
- a0.push({});
+ assertEquals(7, a0.push({}));
assertTrue(%HasFastObjectElements(a0));
- a0.push({});
+ assertEquals(8, a0.push({}));
assertTrue(%HasFastObjectElements(a0));
assertEquals([1,2,3,4,1.3,1.5,{},{}], a0);
// Concat
var a1;
a1 = [1,2,3].concat([]);
- assertTrue(%HasFastSmiElements(a1));
+ //assertTrue(%HasFastSmiElements(a1));
assertEquals([1,2,3], a1);
a1 = [1,2,3].concat([4,5,6]);
assertTrue(%HasFastSmiElements(a1));
@@ -151,28 +151,26 @@ function array_natives_test() {
assertTrue(%HasFastSmiElements(a3));
assertEquals([1], a3r);
assertEquals([2, 2, 3], a3);
-
a3 = [1.1,2,3];
a3r = a3.splice(0, 0);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([1.1, 2, 3], a3);
- a3 = [1.1,2,3];
+ a3 = [1.1, 2, 3];
a3r = a3.splice(0, 1);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([1.1], a3r);
assertEquals([2, 3], a3);
- a3 = [1.1,2,3];
+ a3 = [1.1, 2, 3];
a3r = a3.splice(0, 0, 2);
- // Commented out since handled in js, which takes the best fit.
- // assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([2, 1.1, 2, 3], a3);
- a3 = [1.1,2,3];
+ a3 = [1.1, 2, 3];
+ assertTrue(%HasFastDoubleElements(a3));
a3r = a3.splice(0, 1, 2);
assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
@@ -180,9 +178,7 @@ function array_natives_test() {
assertEquals([2, 2, 3], a3);
a3 = [1.1,2,3];
a3r = a3.splice(0, 0, 2.1);
- // Commented out since handled in js, which takes the best fit.
- // assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([2.1, 1.1, 2, 3], a3);
@@ -194,9 +190,7 @@ function array_natives_test() {
assertEquals([2.2, 2, 3], a3);
a3 = [1,2,3];
a3r = a3.splice(0, 0, 2.1);
- // Commented out since handled in js, which takes the best fit.
- // assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastSmiElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3r));
assertTrue(%HasFastDoubleElements(a3));
assertEquals([], a3r);
assertEquals([2.1, 1, 2, 3], a3);
@@ -206,7 +200,6 @@ function array_natives_test() {
assertTrue(%HasFastDoubleElements(a3));
assertEquals([1], a3r);
assertEquals([2.2, 2, 3], a3);
-
a3 = [{},2,3];
a3r = a3.splice(0, 0);
assertTrue(%HasFastObjectElements(a3r));
@@ -231,7 +224,6 @@ function array_natives_test() {
assertTrue(%HasFastObjectElements(a3));
assertEquals([1], a3r);
assertEquals([{}, 2, 3], a3);
-
a3 = [1.1,2,3];
a3r = a3.splice(0, 0, {});
assertTrue(%HasFastObjectElements(a3r));
@@ -244,48 +236,67 @@ function array_natives_test() {
assertTrue(%HasFastObjectElements(a3));
assertEquals([1.1], a3r);
assertEquals([{}, 2, 3], a3);
+ a3 = [1.1, 2.2, 3.3];
+ a3r = a3.splice(2, 1);
+ assertTrue(%HasFastDoubleElements(a3r));
+ assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([3.3], a3r);
+ //assertTrue(%HasFastDoubleElements(a3r));
+ assertEquals([1.1, 2.2], a3);
+ //assertTrue(%HasFastDoubleElements(a3r));
+ a3r = a3.splice(1, 1, 4.4, 5.5);
+ //assertTrue(%HasFastDoubleElements(a3r));
+ //assertTrue(%HasFastDoubleElements(a3));
+ assertEquals([2.2], a3r);
+ assertEquals([1.1, 4.4, 5.5], a3);
// Pop
var a4 = [1,2,3];
assertEquals(3, a4.pop());
- assertTrue(%HasFastSmiElements(a4));
+ assertEquals([1,2], a4);
+ //assertTrue(%HasFastSmiElements(a4));
a4 = [1.1,2,3];
assertEquals(3, a4.pop());
- assertTrue(%HasFastDoubleElements(a4));
+ assertEquals([1.1,2], a4);
+ //assertTrue(%HasFastDoubleElements(a4));
a4 = [{},2,3];
assertEquals(3, a4.pop());
- assertTrue(%HasFastObjectElements(a4));
+ assertEquals([{},2], a4);
+ //assertTrue(%HasFastObjectElements(a4));
// Shift
var a4 = [1,2,3];
assertEquals(1, a4.shift());
- assertTrue(%HasFastSmiElements(a4));
+ assertEquals([2,3], a4);
+ //assertTrue(%HasFastSmiElements(a4));
a4 = [1.1,2,3];
assertEquals(1.1, a4.shift());
- assertTrue(%HasFastDoubleElements(a4));
+ assertEquals([2,3], a4);
+ //assertTrue(%HasFastDoubleElements(a4));
a4 = [{},2,3];
assertEquals({}, a4.shift());
- assertTrue(%HasFastObjectElements(a4));
+ assertEquals([2,3], a4);
+ //assertTrue(%HasFastObjectElements(a4));
// Unshift
var a4 = [1,2,3];
- a4.unshift(1);
+ assertEquals(4, a4.unshift(1));
assertTrue(%HasFastSmiElements(a4));
assertEquals([1,1,2,3], a4);
a4 = [1,2,3];
- a4.unshift(1.1);
+ assertEquals(4, a4.unshift(1.1));
assertTrue(%HasFastDoubleElements(a4));
assertEquals([1.1,1,2,3], a4);
a4 = [1.1,2,3];
- a4.unshift(1);
+ assertEquals(4, a4.unshift(1));
assertTrue(%HasFastDoubleElements(a4));
assertEquals([1,1.1,2,3], a4);
a4 = [{},2,3];
- a4.unshift(1);
+ assertEquals(4, a4.unshift(1));
assertTrue(%HasFastObjectElements(a4));
assertEquals([1,{},2,3], a4);
a4 = [{},2,3];
- a4.unshift(1.1);
+ assertEquals(4, a4.unshift(1.1));
assertTrue(%HasFastObjectElements(a4));
assertEquals([1.1,{},2,3], a4);
}
diff --git a/deps/v8/test/mjsunit/array-splice.js b/deps/v8/test/mjsunit/array-splice.js
index be2b1064e6..744e95454b 100644
--- a/deps/v8/test/mjsunit/array-splice.js
+++ b/deps/v8/test/mjsunit/array-splice.js
@@ -116,16 +116,41 @@
assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(-1e100);
+ assertEquals([], array);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(-3);
assertEquals([1, 2, 3, 4], array);
assertEquals([5, 6, 7], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(-3.999999);
+ assertEquals([1, 2, 3, 4], array);
+ assertEquals([5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(-3.000001);
+ assertEquals([1, 2, 3, 4], array);
+ assertEquals([5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(4);
assertEquals([1, 2, 3, 4], array);
assertEquals([5, 6, 7], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(4.999999);
+ assertEquals([1, 2, 3, 4], array);
+ assertEquals([5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(4.000001);
+ assertEquals([1, 2, 3, 4], array);
+ assertEquals([5, 6, 7], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(6);
assertEquals([1, 2, 3, 4, 5, 6], array);
assertEquals([7], spliced);
@@ -146,21 +171,51 @@
assertEquals([], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(1e100);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(0, -100);
assertEquals([1, 2, 3, 4, 5, 6, 7], array);
assertEquals([], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, -1e100);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(0, -3);
assertEquals([1, 2, 3, 4, 5, 6, 7], array);
assertEquals([], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, -3.999999);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, -3.000001);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], array);
+ assertEquals([], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(0, 4);
assertEquals([5, 6, 7], array);
assertEquals([1, 2, 3, 4], spliced);
array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 4.999999);
+ assertEquals([5, 6, 7], array);
+ assertEquals([1, 2, 3, 4], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 4.000001);
+ assertEquals([5, 6, 7], array);
+ assertEquals([1, 2, 3, 4], spliced);
+
+ array = [1, 2, 3, 4, 5, 6, 7];
spliced = array.splice(0, 6);
assertEquals([7], array);
assertEquals([1, 2, 3, 4, 5, 6], spliced);
@@ -180,6 +235,11 @@
assertEquals([], array);
assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
+ array = [1, 2, 3, 4, 5, 6, 7];
+ spliced = array.splice(0, 1e100);
+ assertEquals([], array);
+ assertEquals([1, 2, 3, 4, 5, 6, 7], spliced);
+
// Some exotic cases.
obj = { toString: function() { throw 'Exception'; } };
diff --git a/deps/v8/test/mjsunit/asm/atomics-add.js b/deps/v8/test/mjsunit/asm/atomics-add.js
index 69400c8059..77dd4d8a03 100644
--- a/deps/v8/test/mjsunit/asm/atomics-add.js
+++ b/deps/v8/test/mjsunit/asm/atomics-add.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
diff --git a/deps/v8/test/mjsunit/asm/atomics-and.js b/deps/v8/test/mjsunit/asm/atomics-and.js
index e60f1f6a13..5660f508b0 100644
--- a/deps/v8/test/mjsunit/asm/atomics-and.js
+++ b/deps/v8/test/mjsunit/asm/atomics-and.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
diff --git a/deps/v8/test/mjsunit/asm/atomics-compareexchange.js b/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
index 208a06043c..edcd7f908c 100644
--- a/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
+++ b/deps/v8/test/mjsunit/asm/atomics-compareexchange.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
@@ -12,8 +12,6 @@ function Module(stdlib, foreign, heap) {
var MEMU8 = new stdlib.Uint8Array(heap);
var MEMU16 = new stdlib.Uint16Array(heap);
var MEMU32 = new stdlib.Uint32Array(heap);
- var MEMF32 = new stdlib.Float32Array(heap);
- var MEMF64 = new stdlib.Float64Array(heap);
var compareExchange = stdlib.Atomics.compareExchange;
var fround = stdlib.Math.fround;
@@ -59,20 +57,6 @@ function Module(stdlib, foreign, heap) {
return compareExchange(MEMU32, i, o, n)>>>0;
}
- function compareExchangef32(i, o, n) {
- i = i | 0;
- o = fround(o);
- n = fround(n);
- return fround(compareExchange(MEMF32, i, o, n));
- }
-
- function compareExchangef64(i, o, n) {
- i = i | 0;
- o = +o;
- n = +n;
- return +compareExchange(MEMF64, i, o, n);
- }
-
return {
compareExchangei8: compareExchangei8,
compareExchangei16: compareExchangei16,
@@ -80,8 +64,6 @@ function Module(stdlib, foreign, heap) {
compareExchangeu8: compareExchangeu8,
compareExchangeu16: compareExchangeu16,
compareExchangeu32: compareExchangeu32,
- compareExchangef32: compareExchangef32,
- compareExchangef64: compareExchangef64
};
}
@@ -117,5 +99,3 @@ testElementType(Int32Array, m.compareExchangei32, 0);
testElementType(Uint8Array, m.compareExchangeu8, 0);
testElementType(Uint16Array, m.compareExchangeu16, 0);
testElementType(Uint32Array, m.compareExchangeu32, 0);
-testElementType(Float32Array, m.compareExchangef32, NaN);
-testElementType(Float64Array, m.compareExchangef64, NaN);
diff --git a/deps/v8/test/mjsunit/asm/atomics-exchange.js b/deps/v8/test/mjsunit/asm/atomics-exchange.js
index bb70322c7e..ed2b0fa21b 100644
--- a/deps/v8/test/mjsunit/asm/atomics-exchange.js
+++ b/deps/v8/test/mjsunit/asm/atomics-exchange.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
diff --git a/deps/v8/test/mjsunit/asm/atomics-load.js b/deps/v8/test/mjsunit/asm/atomics-load.js
index 769fb40e2c..4234d22c4e 100644
--- a/deps/v8/test/mjsunit/asm/atomics-load.js
+++ b/deps/v8/test/mjsunit/asm/atomics-load.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
@@ -12,8 +12,6 @@ function Module(stdlib, foreign, heap) {
var MEMU8 = new stdlib.Uint8Array(heap);
var MEMU16 = new stdlib.Uint16Array(heap);
var MEMU32 = new stdlib.Uint32Array(heap);
- var MEMF32 = new stdlib.Float32Array(heap);
- var MEMF64 = new stdlib.Float64Array(heap);
var load = stdlib.Atomics.load;
var fround = stdlib.Math.fround;
@@ -47,16 +45,6 @@ function Module(stdlib, foreign, heap) {
return load(MEMU32, i)>>>0;
}
- function loadf32(i) {
- i = i | 0;
- return fround(load(MEMF32, i));
- }
-
- function loadf64(i) {
- i = i | 0;
- return +load(MEMF64, i);
- }
-
return {
loadi8: loadi8,
loadi16: loadi16,
@@ -64,8 +52,6 @@ function Module(stdlib, foreign, heap) {
loadu8: loadu8,
loadu16: loadu16,
loadu32: loadu32,
- loadf32: loadf32,
- loadf64: loadf64
};
}
@@ -98,5 +84,3 @@ testElementType(Int32Array, m.loadi32, 0);
testElementType(Uint8Array, m.loadu8, 0);
testElementType(Uint16Array, m.loadu16, 0);
testElementType(Uint32Array, m.loadu32, 0);
-testElementType(Float32Array, m.loadf32, NaN);
-testElementType(Float64Array, m.loadf64, NaN);
diff --git a/deps/v8/test/mjsunit/asm/atomics-or.js b/deps/v8/test/mjsunit/asm/atomics-or.js
index df87d24d74..7ea29156e8 100644
--- a/deps/v8/test/mjsunit/asm/atomics-or.js
+++ b/deps/v8/test/mjsunit/asm/atomics-or.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
diff --git a/deps/v8/test/mjsunit/asm/atomics-store.js b/deps/v8/test/mjsunit/asm/atomics-store.js
index 1f7a5f91c7..bd4ab6a267 100644
--- a/deps/v8/test/mjsunit/asm/atomics-store.js
+++ b/deps/v8/test/mjsunit/asm/atomics-store.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
@@ -12,8 +12,6 @@ function Module(stdlib, foreign, heap) {
var MEMU8 = new stdlib.Uint8Array(heap);
var MEMU16 = new stdlib.Uint16Array(heap);
var MEMU32 = new stdlib.Uint32Array(heap);
- var MEMF32 = new stdlib.Float32Array(heap);
- var MEMF64 = new stdlib.Float64Array(heap);
var store = stdlib.Atomics.store;
var fround = stdlib.Math.fround;
@@ -53,18 +51,6 @@ function Module(stdlib, foreign, heap) {
return store(MEMU32, i, x)>>>0;
}
- function storef32(i, x) {
- i = i | 0;
- x = fround(x);
- return fround(store(MEMF32, i, x));
- }
-
- function storef64(i, x) {
- i = i | 0;
- x = +x;
- return +store(MEMF64, i, x);
- }
-
return {
storei8: storei8,
storei16: storei16,
@@ -72,8 +58,6 @@ function Module(stdlib, foreign, heap) {
storeu8: storeu8,
storeu16: storeu16,
storeu32: storeu32,
- storef32: storef32,
- storef64: storef64
};
}
@@ -105,5 +89,3 @@ testElementType(Int32Array, m.storei32, 0);
testElementType(Uint8Array, m.storeu8, 0);
testElementType(Uint16Array, m.storeu16, 0);
testElementType(Uint32Array, m.storeu32, 0);
-testElementType(Float32Array, m.storef32, NaN);
-testElementType(Float64Array, m.storef64, NaN);
diff --git a/deps/v8/test/mjsunit/asm/atomics-sub.js b/deps/v8/test/mjsunit/asm/atomics-sub.js
index f9e56ffa4b..d737811790 100644
--- a/deps/v8/test/mjsunit/asm/atomics-sub.js
+++ b/deps/v8/test/mjsunit/asm/atomics-sub.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
diff --git a/deps/v8/test/mjsunit/asm/atomics-xor.js b/deps/v8/test/mjsunit/asm/atomics-xor.js
index 893ea013fd..66052b3940 100644
--- a/deps/v8/test/mjsunit/asm/atomics-xor.js
+++ b/deps/v8/test/mjsunit/asm/atomics-xor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
function Module(stdlib, foreign, heap) {
"use asm";
diff --git a/deps/v8/test/mjsunit/builtins.js b/deps/v8/test/mjsunit/builtins.js
index 5035e67309..62989399de 100644
--- a/deps/v8/test/mjsunit/builtins.js
+++ b/deps/v8/test/mjsunit/builtins.js
@@ -25,7 +25,12 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-natives-as=builtins
+// Flags: --allow-natives-syntax --expose-natives-as=builtins
+
+// Verify that the builtin typed arrays have been pretenured.
+assertFalse(%InNewSpace(builtins.kMath));
+assertFalse(%InNewSpace(builtins.rempio2result));
+assertFalse(%InNewSpace(builtins.rngstate));
// Checks that all function properties of the builtin object that are actually
// constructors (recognized by having properties on their .prototype object),
diff --git a/deps/v8/test/mjsunit/compare-known-objects-slow.js b/deps/v8/test/mjsunit/compare-known-objects-slow.js
index afa198fcb3..41e9fb4ad7 100644
--- a/deps/v8/test/mjsunit/compare-known-objects-slow.js
+++ b/deps/v8/test/mjsunit/compare-known-objects-slow.js
@@ -39,6 +39,22 @@ function eq_strict(a, b) {
return a === b;
}
+function le(a, b) {
+ return a <= b;
+}
+
+function lt(a, b) {
+ return a < b;
+}
+
+function ge(a, b) {
+ return a >= b;
+}
+
+function gt(a, b) {
+ return a > b;
+}
+
function test(a, b) {
// Check CompareIC for equality of known objects.
assertTrue(eq(a, a));
@@ -48,6 +64,22 @@ function test(a, b) {
assertTrue(eq_strict(a, a));
assertTrue(eq_strict(b, b));
assertFalse(eq_strict(a, b));
+ // Check CompareIC for less than or equal of known objects.
+ assertTrue(le(a, a));
+ assertTrue(le(a, b));
+ assertTrue(le(b, a));
+ // Check CompareIC for less than of known objects.
+ assertFalse(lt(a, a));
+ assertFalse(lt(a, b));
+ assertFalse(lt(b, a));
+ // Check CompareIC for greater than or equal of known objects.
+ assertTrue(ge(a, a));
+ assertTrue(ge(a, b));
+ assertTrue(ge(b, a));
+ // Check CompareIC for greater than of known objects.
+ assertFalse(gt(a, a));
+ assertFalse(gt(a, b));
+ assertFalse(gt(b, a));
}
// Prepare two objects in slow mode that have the same map.
diff --git a/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js b/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
new file mode 100644
index 0000000000..81544ca69f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compare-known-objects-tostringtag.js
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-tostring
+
+function le(a, b) {
+ return a <= b;
+}
+
+function lt(a, b) {
+ return a < b;
+}
+
+function ge(a, b) {
+ return a >= b;
+}
+
+function gt(a, b) {
+ return a > b;
+}
+
+function test(a, b) {
+ // Check CompareIC for less than or equal of known objects.
+ assertThrows(function() {le(a, a)});
+ assertThrows(function() {le(a, b)});
+ assertThrows(function() {le(b, a)});
+ // Check CompareIC for less than of known objects.
+ assertThrows(function() {lt(a, a)});
+ assertThrows(function() {lt(a, b)});
+ assertThrows(function() {lt(b, a)});
+ // Check CompareIC for greater than or equal of known objects.
+ assertThrows(function() {ge(a, a)});
+ assertThrows(function() {ge(a, b)});
+ assertThrows(function() {ge(b, a)});
+ // Check CompareIC for greater than of known objects.
+ assertThrows(function() {gt(a, a)});
+ assertThrows(function() {gt(a, b)});
+ assertThrows(function() {gt(b, a)});
+}
+
+function O() { }
+Object.defineProperty(O.prototype, Symbol.toStringTag, {
+ get: function() { throw "@@toStringTag called!" }
+});
+
+var obj1 = new O;
+var obj2 = new O;
+
+assertTrue(%HaveSameMap(obj1, obj2));
+test(obj1, obj2);
+test(obj1, obj2);
+%OptimizeFunctionOnNextCall(le);
+%OptimizeFunctionOnNextCall(lt);
+%OptimizeFunctionOnNextCall(ge);
+%OptimizeFunctionOnNextCall(gt);
+test(obj1, obj2);
diff --git a/deps/v8/test/mjsunit/compare-known-objects.js b/deps/v8/test/mjsunit/compare-known-objects.js
index afffc07014..051b12d700 100644
--- a/deps/v8/test/mjsunit/compare-known-objects.js
+++ b/deps/v8/test/mjsunit/compare-known-objects.js
@@ -39,6 +39,22 @@ function eq_strict(a, b) {
return a === b;
}
+function le(a, b) {
+ return a <= b;
+}
+
+function lt(a, b) {
+ return a < b;
+}
+
+function ge(a, b) {
+ return a >= b;
+}
+
+function gt(a, b) {
+ return a > b;
+}
+
function test(a, b) {
// Check CompareIC for equality of known objects.
assertTrue(eq(a, a));
@@ -48,6 +64,22 @@ function test(a, b) {
assertTrue(eq_strict(a, a));
assertTrue(eq_strict(b, b));
assertFalse(eq_strict(a, b));
+ // Check CompareIC for less than or equal of known objects.
+ assertTrue(le(a, a));
+ assertTrue(le(a, b));
+ assertTrue(le(b, a));
+ // Check CompareIC for less than of known objects.
+ assertFalse(lt(a, a));
+ assertFalse(lt(a, b));
+ assertFalse(lt(b, a));
+ // Check CompareIC for greater than or equal of known objects.
+ assertTrue(ge(a, a));
+ assertTrue(ge(a, b));
+ assertTrue(ge(b, a));
+ // Check CompareIC for greater than of known objects.
+ assertFalse(gt(a, a));
+ assertFalse(gt(a, b));
+ assertFalse(gt(b, a));
}
function O(){};
diff --git a/deps/v8/test/mjsunit/compiler/jsnatives.js b/deps/v8/test/mjsunit/compiler/jsnatives.js
index ab70abc1bb..74d88ba3a6 100644
--- a/deps/v8/test/mjsunit/compiler/jsnatives.js
+++ b/deps/v8/test/mjsunit/compiler/jsnatives.js
@@ -29,5 +29,4 @@
// Test call of JS runtime functions.
-var a = %MakeError(0, "error");
-assertInstanceof(a, Error);
+assertEquals(1, %to_number_fun("1"));
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-1.js b/deps/v8/test/mjsunit/compiler/regress-4389-1.js
new file mode 100644
index 0000000000..c58ce2da40
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-1.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination
+
+function foo(x) { Math.fround(x); }
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(Symbol()) }, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-2.js b/deps/v8/test/mjsunit/compiler/regress-4389-2.js
new file mode 100644
index 0000000000..3b720a5a95
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-2.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination
+
+function foo(x) { Math.sqrt(x); }
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(Symbol()) }, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-3.js b/deps/v8/test/mjsunit/compiler/regress-4389-3.js
new file mode 100644
index 0000000000..9aa72d1ac9
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-3.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination
+
+function foo(x) { Math.floor(x); }
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(Symbol()) }, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-4.js b/deps/v8/test/mjsunit/compiler/regress-4389-4.js
new file mode 100644
index 0000000000..e824973fac
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-4.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination
+
+function foo(x) { Math.round(x); }
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(Symbol()) }, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-5.js b/deps/v8/test/mjsunit/compiler/regress-4389-5.js
new file mode 100644
index 0000000000..64797bc76c
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-5.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination
+
+function foo(x) { Math.abs(x); }
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(Symbol()) }, TypeError);
diff --git a/deps/v8/test/mjsunit/compiler/regress-4389-6.js b/deps/v8/test/mjsunit/compiler/regress-4389-6.js
new file mode 100644
index 0000000000..fe065707f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-4389-6.js
@@ -0,0 +1,11 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --dead-code-elimination
+
+function foo(x) { Math.log(x); }
+foo(1);
+foo(2);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(function() { foo(Symbol()) }, TypeError);
diff --git a/deps/v8/test/mjsunit/constant-fold-control-instructions.js b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
index eb1b0f3c0b..a6f5540cfd 100644
--- a/deps/v8/test/mjsunit/constant-fold-control-instructions.js
+++ b/deps/v8/test/mjsunit/constant-fold-control-instructions.js
@@ -12,12 +12,7 @@ function test() {
assertEquals("function", typeof function() {});
assertEquals("object", typeof null);
assertEquals("object", typeof {});
-
- assertTrue(%_IsObject({}));
- assertTrue(%_IsObject(null));
- assertTrue(%_IsObject(/regex/));
- assertFalse(%_IsObject(0));
- assertFalse(%_IsObject(""));
+ assertEquals("object", typeof /regex/);
assertTrue(%_IsSmi(1));
assertFalse(%_IsSmi(1.1));
diff --git a/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js b/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
index d432f97074..3fe9fdd143 100644
--- a/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/d8-worker-sharedarraybuffer.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-sharedarraybuffer --harmony-atomics
+// Flags: --harmony-sharedarraybuffer
if (this.Worker) {
diff --git a/deps/v8/test/mjsunit/date.js b/deps/v8/test/mjsunit/date.js
index 0fa23f8de1..1c637f7fe3 100644
--- a/deps/v8/test/mjsunit/date.js
+++ b/deps/v8/test/mjsunit/date.js
@@ -341,6 +341,11 @@ date.getYear();
assertThrows(function() { Date.prototype.getYear.call(""); }, TypeError);
assertUnoptimized(Date.prototype.getYear);
+(function TestDatePrototypeOrdinaryObject() {
+ assertEquals(Object.prototype, Date.prototype.__proto__);
+ assertThrows(function () { Date.prototype.toString() }, TypeError);
+})();
+
delete Date.prototype.getUTCFullYear;
delete Date.prototype.getUTCMonth;
delete Date.prototype.getUTCDate;
diff --git a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
index d843ca6a60..ba10122585 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
@@ -87,7 +87,7 @@ function WrapInCatcher(f, holder) {
function WrapInNativeCall(f) {
return function() {
- return %Call(undefined, f);
+ return %Call(f, undefined);
};
}
diff --git a/deps/v8/test/mjsunit/es6/arguments-iterator.js b/deps/v8/test/mjsunit/es6/arguments-iterator.js
index cf1e1f97ca..602294e9fc 100644
--- a/deps/v8/test/mjsunit/es6/arguments-iterator.js
+++ b/deps/v8/test/mjsunit/es6/arguments-iterator.js
@@ -160,6 +160,21 @@ function TestAssignmentToIterator() {
TestAssignmentToIterator(1, 2, 3, 4, 5);
+// Regression test for crbug.com/521484.
+function TestAssignmentToIterator2() {
+ var i = 0;
+ arguments.__defineGetter__('callee', function(){});
+ arguments.__defineGetter__('length', function(){ return 1 });
+ arguments[Symbol.iterator] = [].entries;
+ for (var entry of arguments) {
+ assertEquals([i, arguments[i]], entry);
+ i++;
+ }
+
+ assertEquals(arguments.length, i);
+}
+TestAssignmentToIterator2(1, 2, 3, 4, 5);
+
function TestArgumentsMutation() {
var i = 0;
for (var x of arguments) {
diff --git a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
index fe50fa76b9..001f7053fd 100644
--- a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
+++ b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
@@ -106,11 +106,11 @@ var expected = [
// For-of-let: next(), body, next(), ...
"j16","J4","j16","J4","j16","J4","j16",
// For-var: var decl, condition, body, next, condition, body, ...
- "k7","k20","K4","k23","k20","K4","k23","k20","K4","k23","k20",
+ "k7","k20","K4","k26","k20","K4","k26","k20","K4","k26","k20",
// For: init, condition, body, next, condition, body, ...
- "l7","l16","L4","l19","l16","L4","l19","l16","L4","l19","l16",
+ "l7","l16","L4","l22","l16","L4","l22","l16","L4","l22","l16",
// For-let: init, condition, body, next, condition, body, ...
- "m7","m20","M4","m23","m20","M4","m23","m20","M4","m23","m20",
+ "m7","m20","M4","m26","m20","M4","m26","m20","M4","m26","m20",
// Exit.
"y0","z0",
]
diff --git a/deps/v8/test/mjsunit/harmony/object-assign.js b/deps/v8/test/mjsunit/es6/object-assign.js
index 448ce9a0e0..d56cb0d1cf 100644
--- a/deps/v8/test/mjsunit/harmony/object-assign.js
+++ b/deps/v8/test/mjsunit/es6/object-assign.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-object
-
// Based on Mozilla Object.assign() tests
function checkDataProperty(object, propertyKey, value, writable, enumerable, configurable) {
diff --git a/deps/v8/test/mjsunit/es6/object-literals-property-shorthand.js b/deps/v8/test/mjsunit/es6/object-literals-property-shorthand.js
index 29ce66d270..f0ddd68d42 100644
--- a/deps/v8/test/mjsunit/es6/object-literals-property-shorthand.js
+++ b/deps/v8/test/mjsunit/es6/object-literals-property-shorthand.js
@@ -10,6 +10,14 @@
})();
+(function TestBasicsGetSet() {
+ var get = 1, set = 2;
+ var object = {get, set};
+ assertEquals(1, object.get);
+ assertEquals(2, object.set);
+})();
+
+
(function TestDescriptor() {
var x = 1;
var object = {x};
diff --git a/deps/v8/test/mjsunit/es6/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index 19239b601b..405da02b53 100644
--- a/deps/v8/test/mjsunit/es6/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -62,7 +62,15 @@ function clearProp(o, name) {
// Find intrinsics and null them out.
var globals = Object.getOwnPropertyNames(this)
-var whitelist = {Promise: true, TypeError: true}
+var whitelist = {
+ Promise: true,
+ TypeError: true,
+ String: true,
+ JSON: true,
+ Error: true,
+ MjsUnitAssertionError: true
+};
+
for (var i in globals) {
var name = globals[i]
if (name in whitelist || name[0] === name[0].toLowerCase()) delete globals[i]
@@ -97,7 +105,6 @@ function assertAsyncDone(iteration) {
});
}
-
(function() {
assertThrows(function() { Promise(function() {}) }, TypeError)
})();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-468661.js b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
index e569474d23..543a87c1df 100644
--- a/deps/v8/test/mjsunit/es6/regress/regress-468661.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
@@ -63,8 +63,8 @@ var sum = 0;
debugger; // Break 0.
for (let i=0; // Break 1.
- i < 1; // Break 2. // Break 5. // Break 6.
- i++) {
+ i < 1; // Break 2. // Break 6.
+ i++) { // Break 5.
let key = i; // Break 3.
sum += key; // Break 4.
}
diff --git a/deps/v8/test/mjsunit/es6/toMethod.js b/deps/v8/test/mjsunit/es6/toMethod.js
deleted file mode 100644
index c18251b2dc..0000000000
--- a/deps/v8/test/mjsunit/es6/toMethod.js
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --allow-natives-syntax
-
-
-(function TestSingleClass() {
- function f(x) {
- var a = [0, 1, 2]
- return a[x];
- }
-
- function ClassD() { }
-
- assertEquals(1, f(1));
- var g = %ToMethod(f, ClassD.prototype);
- assertEquals(1, g(1));
- assertEquals(undefined, f[%HomeObjectSymbol()]);
- assertEquals(ClassD.prototype, g[%HomeObjectSymbol()]);
-}());
-
-
-(function TestClassHierarchy() {
- function f(x) {
- return function g(y) { x++; return x + y; };
- }
-
- function Base() {}
- function Derived() { }
- Derived.prototype = Object.create(Base.prototype);
-
- var q = f(0);
- assertEquals(2, q(1));
- assertEquals(3, q(1));
- var g = %ToMethod(q, Derived.prototype);
- assertFalse(g === q);
- assertEquals(4, g(1));
- assertEquals(5, q(1));
-}());
-
-
-(function TestPrototypeChain() {
- var o = {};
- var o1 = {};
- function f() { }
-
- function g() { }
-
- var fMeth = %ToMethod(f, o);
- assertEquals(o, fMeth[%HomeObjectSymbol()]);
- g.__proto__ = fMeth;
- assertEquals(undefined, g[%HomeObjectSymbol()]);
- var gMeth = %ToMethod(g, o1);
- assertEquals(fMeth, gMeth.__proto__);
- assertEquals(o, fMeth[%HomeObjectSymbol()]);
- assertEquals(o1, gMeth[%HomeObjectSymbol()]);
-}());
-
-
-(function TestBoundFunction() {
- var o = {};
- var p = {};
-
-
- function f(x, y, z, w) {
- assertEquals(o, this);
- assertEquals(1, x);
- assertEquals(2, y);
- assertEquals(3, z);
- assertEquals(4, w);
- return x+y+z+w;
- }
-
- var fBound = f.bind(o, 1, 2, 3);
- var fMeth = %ToMethod(fBound, p);
- assertEquals(10, fMeth(4));
- assertEquals(10, fMeth.call(p, 4));
- var fBound1 = fBound.bind(o, 4);
- assertEquals(10, fBound1());
- var fMethBound = fMeth.bind(o, 4);
- assertEquals(10, fMethBound());
-}());
-
-(function TestOptimized() {
- function f(o) {
- return o.x;
- }
- var o = {x : 15};
- assertEquals(15, f(o));
- assertEquals(15, f(o));
- %OptimizeFunctionOnNextCall(f);
- assertEquals(15, f(o));
- var g = %ToMethod(f, {});
- var o1 = {y : 1024, x : "abc"};
- assertEquals("abc", f(o1));
- assertEquals("abc", g(o1));
-} ());
-
-(function TestExtensibility() {
- function f() {}
- Object.preventExtensions(f);
- assertFalse(Object.isExtensible(f));
- var m = %ToMethod(f, {});
- assertTrue(Object.isExtensible(m));
-}());
diff --git a/deps/v8/test/mjsunit/harmony/array-length.js b/deps/v8/test/mjsunit/harmony/array-length.js
new file mode 100644
index 0000000000..df488196ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/array-length.js
@@ -0,0 +1,208 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-tolength
+
+// Test array functions do not cause infinite loops when length is negative,
+// max_value, etc.
+
+// ArrayToString
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.toString.call(o);
+assertEquals("[object Object]", result);
+
+// ArrayToLocaleString
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.toLocaleString.call(o);
+assertEquals("", result);
+
+// ArrayJoin
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.join.call(o);
+assertEquals(0, result.length);
+
+// ArrayPush
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.push.call(o, 1);
+assertEquals(1, o.length);
+assertEquals(1, o[0]);
+
+var o = { length: Number.MAX_VALUE };
+Array.prototype.push.call(o, 1);
+assertEquals(o.length, Number.MAX_SAFE_INTEGER + 1);
+assertEquals(1, o[Number.MAX_SAFE_INTEGER]);
+
+Array.prototype.push.call(o, 2);
+assertEquals(o.length, Number.MAX_SAFE_INTEGER + 1);
+assertEquals(2, o[Number.MAX_SAFE_INTEGER]);
+
+// ArrayPop
+
+var o = { length: 0 };
+Array.prototype.pop.call(o);
+assertEquals(0, o.length);
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.pop.call(o);
+assertEquals(0, o.length);
+
+var o = { length: Number.MAX_VALUE };
+Array.prototype.pop.call(o);
+assertEquals(o.length, Number.MAX_SAFE_INTEGER - 1);
+
+// ArrayReverse
+
+var o = { 0: 'foo', length: Number.MIN_VALUE }
+var result = Array.prototype.reverse.call(o);
+assertEquals('object', typeof(result));
+assertEquals(Number.MIN_VALUE, result.length);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayShift
+
+var o = { 0: "foo", length: Number.MIN_VALUE }
+var result = Array.prototype.shift.call(o);
+assertEquals(undefined, result);
+assertEquals(0, o.length);
+
+// ArrayUnshift
+
+var o = { length: 0 };
+Array.prototype.unshift.call(o);
+assertEquals(0, o.length);
+
+var o = { length: 0 };
+Array.prototype.unshift.call(o, 'foo');
+assertEquals('foo', o[0]);
+assertEquals(1, o.length);
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.unshift.call(o);
+assertEquals(0, o.length);
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.unshift.call(o, 'foo');
+assertEquals('foo', o[0]);
+assertEquals(1, o.length);
+
+// ArraySplice
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.splice.call(o);
+assertEquals(0, o.length);
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.splice.call(o, 0, 10, ['foo']);
+assertEquals(['foo'], o[0]);
+assertEquals(1, o.length);
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.splice.call(o, -1);
+assertEquals(0, o.length);
+
+var o = { length: Number.MAX_SAFE_INTEGER };
+Array.prototype.splice.call(o, -1);
+assertEquals(Number.MAX_SAFE_INTEGER - 1, o.length);
+
+// ArraySlice
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.slice.call(o);
+assertEquals(0, result.length);
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.slice.call(o, Number.MAX_VALUE);
+assertEquals(0, result.length);
+
+var o = { length: Number.MAX_VALUE };
+var result = Array.prototype.slice.call(o, Number.MAX_VALUE - 1);
+assertEquals(0, result.length);
+
+// ArrayIndexOf
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.indexOf.call(o);
+assertEquals(-1, result);
+
+var o = { length: Number.MAX_SAFE_INTEGER }
+o[Number.MAX_SAFE_INTEGER - 1] = "foo"
+var result = Array.prototype.indexOf.call(o,
+ "foo", Number.MAX_SAFE_INTEGER - 2);
+assertEquals(Number.MAX_SAFE_INTEGER - 1, result);
+
+var o = { length: Number.MAX_SAFE_INTEGER };
+o[Number.MAX_SAFE_INTEGER - 1] = "foo";
+var result = Array.prototype.indexOf.call(o, "foo", -1);
+assertEquals(Number.MAX_SAFE_INTEGER - 1, result);
+
+// ArrayLastIndexOf
+
+var o = { length: Number.MIN_VALUE };
+var result = Array.prototype.lastIndexOf.call(o);
+assertEquals(-1, result);
+
+var o = { length: Number.MAX_SAFE_INTEGER }
+o[Number.MAX_SAFE_INTEGER - 1] = "foo"
+var result = Array.prototype.lastIndexOf.call(o,
+ "foo", Number.MAX_SAFE_INTEGER);
+assertEquals(Number.MAX_SAFE_INTEGER - 1, result);
+
+var o = { length: Number.MAX_SAFE_INTEGER };
+o[Number.MAX_SAFE_INTEGER - 1] = "foo";
+var result = Array.prototype.lastIndexOf.call(o, "foo", -1);
+assertEquals(Number.MAX_SAFE_INTEGER - 1, result);
+
+// ArrayFilter
+
+var func = function(v) { return v; }
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.filter.call(o, func);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayForEach
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.forEach.call(o, func);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArraySome
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.some.call(o, func);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayEvery
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.every.call(o, func);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayMap
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.map.call(o, func);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayReduce
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.reduce.call(o, func, 0);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayReduceRight
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.reduceRight.call(o, func, 0);
+assertEquals(Number.MIN_VALUE, o.length);
+
+// ArrayFill
+
+var o = { length: Number.MIN_VALUE };
+Array.prototype.fill(o, 0);
+assertEquals(Number.MIN_VALUE, o.length);
diff --git a/deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js b/deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js
new file mode 100644
index 0000000000..6e760fcb17
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/arrow-rest-params-lazy-parsing.js
@@ -0,0 +1,145 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rest-parameters --harmony-arrow-functions
+// Flags: --min-preparse-length=0
+
+(function testRestIndex() {
+ assertEquals(5, ((...args) => args.length)(1,2,3,4,5));
+ assertEquals(4, ((a, ...args) => args.length)(1,2,3,4,5));
+ assertEquals(3, ((a, b, ...args) => args.length)(1,2,3,4,5));
+ assertEquals(2, ((a, b, c, ...args) => args.length)(1,2,3,4,5));
+ assertEquals(1, ((a, b, c, d, ...args) => args.length)(1,2,3,4,5));
+ assertEquals(0, ((a, b, c, d, e, ...args) => args.length)(1,2,3,4,5));
+})();
+
+// strictTest and sloppyTest should be called with descending natural
+// numbers, as in:
+//
+// strictTest(6,5,4,3,2,1)
+//
+var strictTest = (function() {
+ "use strict";
+ return (a, b, ...c) => {
+ assertEquals(Array, c.constructor);
+ assertTrue(Array.isArray(c));
+
+ var expectedLength = (a === undefined) ? 0 : a - 2;
+ assertEquals(expectedLength, c.length);
+
+ for (var i = 2; i < a; ++i) {
+ assertEquals(c[i - 2], a - i);
+ }
+ };
+})();
+
+var sloppyTest = (a, b, ...c) => {
+ assertEquals(Array, c.constructor);
+ assertTrue(Array.isArray(c));
+
+ var expectedLength = (a === undefined) ? 0 : a - 2;
+ assertEquals(expectedLength, c.length);
+
+ for (var i = 2; i < a; ++i) {
+ assertEquals(c[i - 2], a - i);
+ }
+};
+
+
+var O = {
+ strict: strictTest,
+ sloppy: sloppyTest
+};
+
+(function testStrictRestParamArity() {
+ assertEquals(2, strictTest.length);
+ assertEquals(2, O.strict.length);
+})();
+
+
+(function testRestParamsStrictMode() {
+ strictTest();
+ strictTest(2, 1);
+ strictTest(6, 5, 4, 3, 2, 1);
+ strictTest(3, 2, 1);
+ O.strict();
+ O.strict(2, 1);
+ O.strict(6, 5, 4, 3, 2, 1);
+ O.strict(3, 2, 1);
+})();
+
+
+(function testRestParamsStrictModeApply() {
+ strictTest.apply(null, []);
+ strictTest.apply(null, [2, 1]);
+ strictTest.apply(null, [6, 5, 4, 3, 2, 1]);
+ strictTest.apply(null, [3, 2, 1]);
+ O.strict.apply(O, []);
+ O.strict.apply(O, [2, 1]);
+ O.strict.apply(O, [6, 5, 4, 3, 2, 1]);
+ O.strict.apply(O, [3, 2, 1]);
+})();
+
+
+(function testRestParamsStrictModeCall() {
+ strictTest.call(null);
+ strictTest.call(null, 2, 1);
+ strictTest.call(null, 6, 5, 4, 3, 2, 1);
+ strictTest.call(null, 3, 2, 1);
+ O.strict.call(O);
+ O.strict.call(O, 2, 1);
+ O.strict.call(O, 6, 5, 4, 3, 2, 1);
+ O.strict.call(O, 3, 2, 1);
+})();
+
+
+(function testsloppyRestParamArity() {
+ assertEquals(2, sloppyTest.length);
+ assertEquals(2, O.sloppy.length);
+})();
+
+
+(function testRestParamsSloppyMode() {
+ sloppyTest();
+ sloppyTest(2, 1);
+ sloppyTest(6, 5, 4, 3, 2, 1);
+ sloppyTest(3, 2, 1);
+ O.sloppy();
+ O.sloppy(2, 1);
+ O.sloppy(6, 5, 4, 3, 2, 1);
+ O.sloppy(3, 2, 1);
+})();
+
+
+(function testRestParamssloppyModeApply() {
+ sloppyTest.apply(null, []);
+ sloppyTest.apply(null, [2, 1]);
+ sloppyTest.apply(null, [6, 5, 4, 3, 2, 1]);
+ sloppyTest.apply(null, [3, 2, 1]);
+ O.sloppy.apply(O, []);
+ O.sloppy.apply(O, [2, 1]);
+ O.sloppy.apply(O, [6, 5, 4, 3, 2, 1]);
+ O.sloppy.apply(O, [3, 2, 1]);
+})();
+
+
+(function testRestParamssloppyModeCall() {
+ sloppyTest.call(null);
+ sloppyTest.call(null, 2, 1);
+ sloppyTest.call(null, 6, 5, 4, 3, 2, 1);
+ sloppyTest.call(null, 3, 2, 1);
+ O.sloppy.call(O);
+ O.sloppy.call(O, 2, 1);
+ O.sloppy.call(O, 6, 5, 4, 3, 2, 1);
+ O.sloppy.call(O, 3, 2, 1);
+})();
+
+
+(function testUnmappedArguments() {
+ // Normal functions make their arguments object unmapped, but arrow
+ // functions don't have an arguments object anyway. Check that the
+ // right thing happens for arguments in arrow functions with rest
+ // parameters.
+ assertSame(arguments, ((...rest) => arguments)());
+})();
diff --git a/deps/v8/test/mjsunit/harmony/arrow-rest-params.js b/deps/v8/test/mjsunit/harmony/arrow-rest-params.js
index b1e8dcc1b9..0ee77390ed 100644
--- a/deps/v8/test/mjsunit/harmony/arrow-rest-params.js
+++ b/deps/v8/test/mjsunit/harmony/arrow-rest-params.js
@@ -18,18 +18,20 @@
//
// strictTest(6,5,4,3,2,1)
//
-var strictTest = (a, b, ...c) => {
+var strictTest = (() => {
"use strict";
- assertEquals(Array, c.constructor);
- assertTrue(Array.isArray(c));
+ return (a, b, ...c) => {
+ assertEquals(Array, c.constructor);
+ assertTrue(Array.isArray(c));
- var expectedLength = (a === undefined) ? 0 : a - 2;
- assertEquals(expectedLength, c.length);
+ var expectedLength = (a === undefined) ? 0 : a - 2;
+ assertEquals(expectedLength, c.length);
- for (var i = 2; i < a; ++i) {
- assertEquals(c[i - 2], a - i);
- }
-}
+ for (var i = 2; i < a; ++i) {
+ assertEquals(c[i - 2], a - i);
+ }
+ };
+})();
var sloppyTest = (a, b, ...c) => {
assertEquals(Array, c.constructor);
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index bff9f95a81..dbd372fa6d 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --harmony-sharedarraybuffer
//
function toRangeWrapped(value) {
@@ -38,11 +38,6 @@ var IntegerTypedArrayConstructors = [
makeConstructorObject(Uint32Array, 0, 0xffffffff, toRangeWrapped),
];
-var TypedArrayConstructors = IntegerTypedArrayConstructors.concat([
- {constr: Float32Array},
- {constr: Float64Array},
-]);
-
(function TestBadArray() {
var ab = new ArrayBuffer(16);
var u32a = new Uint32Array(16);
@@ -50,8 +45,8 @@ var TypedArrayConstructors = IntegerTypedArrayConstructors.concat([
var sf32a = new Float32Array(sab);
var sf64a = new Float64Array(sab);
- // Atomic ops required shared typed arrays
- [undefined, 1, 'hi', 3.4, ab, u32a, sab].forEach(function(o) {
+ // Atomic ops required integer shared typed arrays
+ [undefined, 1, 'hi', 3.4, ab, u32a, sab, sf32a, sf64a].forEach(function(o) {
assertThrows(function() { Atomics.compareExchange(o, 0, 0, 0); },
TypeError);
assertThrows(function() { Atomics.load(o, 0); }, TypeError);
@@ -63,16 +58,6 @@ var TypedArrayConstructors = IntegerTypedArrayConstructors.concat([
assertThrows(function() { Atomics.xor(o, 0, 0); }, TypeError);
assertThrows(function() { Atomics.exchange(o, 0, 0); }, TypeError);
});
-
- // Arithmetic atomic ops require integer shared arrays
- [sab, sf32a, sf64a].forEach(function(o) {
- assertThrows(function() { Atomics.add(o, 0, 0); }, TypeError);
- assertThrows(function() { Atomics.sub(o, 0, 0); }, TypeError);
- assertThrows(function() { Atomics.and(o, 0, 0); }, TypeError);
- assertThrows(function() { Atomics.or(o, 0, 0); }, TypeError);
- assertThrows(function() { Atomics.xor(o, 0, 0); }, TypeError);
- assertThrows(function() { Atomics.exchange(o, 0, 0); }, TypeError);
- });
})();
function testAtomicOp(op, ia, index, expectedIndex, name) {
@@ -163,7 +148,7 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
})();
(function TestCompareExchange() {
- TypedArrayConstructors.forEach(function(t) {
+ IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
var name = Object.prototype.toString.call(sta);
@@ -177,32 +162,10 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
assertEquals(50, sta[i], name);
}
});
-
- // * Exact float values should be OK
- // * Infinity, -Infinity should be OK (has exact representation)
- // * NaN is not OK, it has many representations, cannot ensure successful CAS
- // because it does a bitwise compare
- [1.5, 4.25, -1e8, -Infinity, Infinity].forEach(function(v) {
- var sab = new SharedArrayBuffer(10 * Float32Array.BYTES_PER_ELEMENT);
- var sf32a = new Float32Array(sab);
- sf32a[0] = 0;
- assertEquals(0, Atomics.compareExchange(sf32a, 0, 0, v));
- assertEquals(v, sf32a[0]);
- assertEquals(v, Atomics.compareExchange(sf32a, 0, v, 0));
- assertEquals(0, sf32a[0]);
-
- var sab2 = new SharedArrayBuffer(10 * Float64Array.BYTES_PER_ELEMENT);
- var sf64a = new Float64Array(sab2);
- sf64a[0] = 0;
- assertEquals(0, Atomics.compareExchange(sf64a, 0, 0, v));
- assertEquals(v, sf64a[0]);
- assertEquals(v, Atomics.compareExchange(sf64a, 0, v, 0));
- assertEquals(0, sf64a[0]);
- });
})();
(function TestLoad() {
- TypedArrayConstructors.forEach(function(t) {
+ IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
var name = Object.prototype.toString.call(sta);
@@ -216,7 +179,7 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
})();
(function TestStore() {
- TypedArrayConstructors.forEach(function(t) {
+ IntegerTypedArrayConstructors.forEach(function(t) {
var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
var sta = new t.constr(sab);
var name = Object.prototype.toString.call(sta);
@@ -228,20 +191,6 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
assertEquals(100, sta[i], name);
}
});
-
- [1.5, 4.25, -1e8, -Infinity, Infinity, NaN].forEach(function(v) {
- var sab = new SharedArrayBuffer(10 * Float32Array.BYTES_PER_ELEMENT);
- var sf32a = new Float32Array(sab);
- sf32a[0] = 0;
- assertEquals(v, Atomics.store(sf32a, 0, v));
- assertEquals(v, sf32a[0]);
-
- var sab2 = new SharedArrayBuffer(10 * Float64Array.BYTES_PER_ELEMENT);
- var sf64a = new Float64Array(sab2);
- sf64a[0] = 0;
- assertEquals(v, Atomics.store(sf64a, 0, v));
- assertEquals(v, sf64a[0]);
- });
})();
(function TestAdd() {
@@ -348,7 +297,7 @@ function testAtomicOp(op, ia, index, expectedIndex, name) {
// Sizes that aren't equal to a typedarray BYTES_PER_ELEMENT always return
// false.
var validSizes = {};
- TypedArrayConstructors.forEach(function(t) {
+ IntegerTypedArrayConstructors.forEach(function(t) {
validSizes[t.constr.BYTES_PER_ELEMENT] = true;
});
diff --git a/deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js
new file mode 100644
index 0000000000..9b3cc44c0d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-let-contextual-sloppy.js
@@ -0,0 +1,64 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-sloppy --harmony-sloppy-let --harmony-destructuring
+
+
+{
+ assertThrows(function() { return let; }, ReferenceError);
+ let let;
+
+ let = 5;
+ assertEquals(5, let);
+
+ { let let = 1; assertEquals(1, let); }
+ assertEquals(5, let);
+}
+
+assertThrows(function() { return let; }, ReferenceError);
+
+(function() {
+ var let, sum = 0;
+ for (let in [1, 2, 3, 4]) sum += Number(let);
+ assertEquals(6, sum);
+
+ for (let let of [4, 5]) sum += let;
+ assertEquals(15, sum);
+
+ for (let let in [6]) sum += Number([6][let]);
+ assertEquals(21, sum);
+
+ for (let = 7; let < 8; let++) sum += let;
+ assertEquals(28, sum);
+ assertEquals(8, let);
+
+ for (let let = 8; let < 9; let++) sum += let;
+ assertEquals(36, sum);
+ assertEquals(8, let);
+})()
+
+assertThrows(function() { return let; }, ReferenceError);
+
+{
+ let obj = {};
+ let {let} = {let() { return obj; }};
+ let().x = 1;
+ assertEquals(1, obj.x);
+}
+
+{
+ let obj = {};
+ let [let] = [function() { return obj; }];
+ let().x = 1;
+ assertEquals(1, obj.x);
+}
+
+(function() {
+ function let() {
+ return 1;
+ }
+ assertEquals(1, let());
+})()
+
+assertThrows('for (let of []) {}', SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js b/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
index 3d529fc36d..8ec1eeacd0 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-semantics-sloppy.js
@@ -127,8 +127,9 @@ function f() {
}
f();
-// Test that a function declaration introduces a block scoped variable.
-TestAll('{ function k() { return 0; } }; k(); ');
+// Test that a function declaration introduces a block scoped variable
+// and no function hoisting if there is a conflict.
+TestFunctionLocal('{ function k() { return 0; } }; k(); let k;');
// Test that a function declaration sees the scope it resides in.
function f2() {
diff --git a/deps/v8/test/mjsunit/harmony/block-sloppy-function.js b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
new file mode 100644
index 0000000000..a17a4c0799
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-sloppy-function.js
@@ -0,0 +1,203 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-legacy-const --harmony-sloppy --harmony-sloppy-let
+// Flags: --harmony-sloppy-function --harmony-destructuring
+// Flags: --harmony-rest-parameters
+
+// Test Annex B 3.3 semantics for functions declared in blocks in sloppy mode.
+// http://www.ecma-international.org/ecma-262/6.0/#sec-block-level-function-declarations-web-legacy-compatibility-semantics
+
+(function overridingLocalFunction() {
+ var x = [];
+ assertEquals('function', typeof f);
+ function f() {
+ x.push(1);
+ }
+ f();
+ {
+ f();
+ function f() {
+ x.push(2);
+ }
+ f();
+ }
+ f();
+ {
+ f();
+ function f() {
+ x.push(3);
+ }
+ f();
+ }
+ f();
+ assertArrayEquals([1, 2, 2, 2, 3, 3, 3], x);
+})();
+
+(function newFunctionBinding() {
+ var x = [];
+ assertEquals('undefined', typeof f);
+ {
+ f();
+ function f() {
+ x.push(2);
+ }
+ f();
+ }
+ f();
+ {
+ f();
+ function f() {
+ x.push(3);
+ }
+ f();
+ }
+ f();
+ assertArrayEquals([2, 2, 2, 3, 3, 3], x);
+})();
+
+(function shadowingLetDoesntBind() {
+ let f = 1;
+ assertEquals(1, f);
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals(1, f);
+})();
+
+(function shadowingClassDoesntBind() {
+ class f { }
+ assertEquals('class f { }', f.toString());
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals('class f { }', f.toString());
+})();
+
+(function shadowingConstDoesntBind() {
+ const f = 1;
+ assertEquals(1, f);
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals(1, f);
+})();
+
+(function shadowingVarBinds() {
+ var f = 1;
+ assertEquals(1, f);
+ {
+ let y = 3;
+ function f() {
+ y = 2;
+ }
+ f();
+ assertEquals(2, y);
+ }
+ assertEquals('function', typeof f);
+})();
+
+(function conditional() {
+ if (true) {
+ function f() { return 1; }
+ } else {
+ function f() { return 2; }
+ }
+ assertEquals(1, f());
+
+ if (false) {
+ function g() { return 1; }
+ } else {
+ function g() { return 2; }
+ }
+ assertEquals(2, g());
+})();
+
+(function skipExecution() {
+ {
+ function f() { return 1; }
+ }
+ assertEquals(1, f());
+ {
+ function f() { return 2; }
+ }
+ assertEquals(2, f());
+ L: {
+ assertEquals(3, f());
+ break L;
+ function f() { return 3; }
+ }
+ assertEquals(2, f());
+})();
+
+// Test that hoisting from blocks doesn't happen in global scope
+function globalUnhoisted() { return 0; }
+{
+ function globalUnhoisted() { return 1; }
+}
+assertEquals(0, globalUnhoisted());
+
+// Test that shadowing arguments is fine
+(function shadowArguments(x) {
+ assertArrayEquals([1], arguments);
+ {
+ assertEquals('function', typeof arguments);
+ function arguments() {}
+ assertEquals('function', typeof arguments);
+ }
+ assertEquals('function', typeof arguments);
+})(1);
+
+// Shadow function parameter
+(function shadowParameter(x) {
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ assertEquals('function', typeof x);
+})(1);
+
+// Shadow function parameter
+(function shadowDefaultParameter(x = 0) {
+ assertEquals(1, x);
+ {
+ function x() {}
+ }
+ // TODO(littledan): Once destructured parameters are no longer
+ // let-bound, enable this assertion. This is the core of the test.
+ // assertEquals('function', typeof x);
+})(1);
+
+(function shadowRestParameter(...x) {
+ assertArrayEquals([1], x);
+ {
+ function x() {}
+ }
+ // TODO(littledan): Once destructured parameters are no longer
+ // let-bound, enable this assertion. This is the core of the test.
+ // assertEquals('function', typeof x);
+})(1);
+
+assertThrows(function notInDefaultScope(x = y) {
+ {
+ function y() {}
+ }
+ assertEquals('function', typeof y);
+ assertEquals(x, undefined);
+}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/harmony/default-parameters.js b/deps/v8/test/mjsunit/harmony/default-parameters.js
index 43a7acd1c6..b3a79a49a4 100644
--- a/deps/v8/test/mjsunit/harmony/default-parameters.js
+++ b/deps/v8/test/mjsunit/harmony/default-parameters.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-default-parameters --harmony-arrow-functions
-// Flags: --harmony-rest-parameters
+// Flags: --harmony-rest-parameters --harmony-destructuring
(function TestDefaults() {
@@ -81,109 +81,290 @@
})();
-(function TestParameterScoping() {
- // TODO(rossberg): Add checks for variable declarations in defaults.
+(function TestParameterScopingSloppy() {
var x = 1;
function f1(a = x) { var x = 2; return a; }
assertEquals(1, f1());
function f2(a = x) { function x() {}; return a; }
assertEquals(1, f2());
- function f3(a = x) { 'use strict'; let x = 2; return a; }
+ function f3(a = eval("x")) { var x; return a; }
assertEquals(1, f3());
- function f4(a = x) { 'use strict'; const x = 2; return a; }
+ function f31(a = eval("'use strict'; x")) { var x; return a; }
+ assertEquals(1, f31());
+ function f4(a = function() { return x }) { var x; return a(); }
assertEquals(1, f4());
- function f5(a = x) { 'use strict'; function x() {}; return a; }
+ function f5(a = () => x) { var x; return a(); }
assertEquals(1, f5());
- function f6(a = eval("x")) { var x; return a; }
+ function f6(a = () => eval("x")) { var x; return a(); }
assertEquals(1, f6());
- function f61(a = eval("x")) { 'use strict'; var x; return a; }
+ function f61(a = () => { 'use strict'; return eval("x") }) { var x; return a(); }
assertEquals(1, f61());
- function f62(a = eval("'use strict'; x")) { var x; return a; }
+ function f62(a = () => eval("'use strict'; x")) { var x; return a(); }
assertEquals(1, f62());
- function f7(a = function() { return x }) { var x; return a(); }
- assertEquals(1, f7());
- function f8(a = () => x) { var x; return a(); }
- assertEquals(1, f8());
- function f9(a = () => eval("x")) { var x; return a(); }
- assertEquals(1, f9());
- function f91(a = () => eval("x")) { 'use strict'; var x; return a(); }
- assertEquals(1, f91());
- function f92(a = () => { 'use strict'; return eval("x") }) { var x; return a(); }
- assertEquals(1, f92());
- function f93(a = () => eval("'use strict'; x")) { var x; return a(); }
- assertEquals(1, f93());
var g1 = (a = x) => { var x = 2; return a; };
assertEquals(1, g1());
var g2 = (a = x) => { function x() {}; return a; };
assertEquals(1, g2());
- var g3 = (a = x) => { 'use strict'; let x = 2; return a; };
+ var g3 = (a = eval("x")) => { var x; return a; };
assertEquals(1, g3());
- var g4 = (a = x) => { 'use strict'; const x = 2; return a; };
+ var g31 = (a = eval("'use strict'; x")) => { var x; return a; };
+ assertEquals(1, g31());
+ var g4 = (a = function() { return x }) => { var x; return a(); };
assertEquals(1, g4());
- var g5 = (a = x) => { 'use strict'; function x() {}; return a; };
+ var g5 = (a = () => x) => { var x; return a(); };
assertEquals(1, g5());
- var g6 = (a = eval("x")) => { var x; return a; };
+ var g6 = (a = () => eval("x")) => { var x; return a(); };
assertEquals(1, g6());
- var g61 = (a = eval("x")) => { 'use strict'; var x; return a; };
+ var g61 = (a = () => { 'use strict'; return eval("x") }) => { var x; return a(); };
assertEquals(1, g61());
- var g62 = (a = eval("'use strict'; x")) => { var x; return a; };
+ var g62 = (a = () => eval("'use strict'; x")) => { var x; return a(); };
assertEquals(1, g62());
- var g7 = (a = function() { return x }) => { var x; return a(); };
- assertEquals(1, g7());
- var g8 = (a = () => x) => { var x; return a(); };
- assertEquals(1, g8());
- var g9 = (a = () => eval("x")) => { var x; return a(); };
- assertEquals(1, g9());
- var g91 = (a = () => eval("x")) => { 'use strict'; var x; return a(); };
- assertEquals(1, g91());
- var g92 = (a = () => { 'use strict'; return eval("x") }) => { var x; return a(); };
- assertEquals(1, g92());
- var g93 = (a = () => eval("'use strict'; x")) => { var x; return a(); };
- assertEquals(1, g93());
var f11 = function f(x = f) { var f; return x; }
assertSame(f11, f11());
var f12 = function f(x = f) { function f() {}; return x; }
assertSame(f12, f12());
- var f13 = function f(x = f) { 'use strict'; let f; return x; }
- assertSame(f13, f13());
- var f14 = function f(x = f) { 'use strict'; const f = 0; return x; }
- assertSame(f14, f14());
- var f15 = function f(x = f) { 'use strict'; function f() {}; return x; }
- assertSame(f15, f15());
- var f16 = function f(f = 7, x = f) { return x; }
- assertSame(7, f16());
+ var f13 = function f(f = 7, x = f) { return x; }
+ assertSame(7, f13());
var o1 = {f: function(x = this) { return x; }};
assertSame(o1, o1.f());
assertSame(1, o1.f(1));
})();
+(function TestParameterScopingStrict() {
+ "use strict";
+ var x = 1;
+
+ function f1(a = x) { let x = 2; return a; }
+ assertEquals(1, f1());
+ function f2(a = x) { const x = 2; return a; }
+ assertEquals(1, f2());
+ function f3(a = x) { function x() {}; return a; }
+ assertEquals(1, f3());
+ function f4(a = eval("x")) { var x; return a; }
+ assertEquals(1, f4());
+ function f5(a = () => eval("x")) { var x; return a(); }
+ assertEquals(1, f5());
+
+ var g1 = (a = x) => { let x = 2; return a; };
+ assertEquals(1, g1());
+ var g2 = (a = x) => { const x = 2; return a; };
+ assertEquals(1, g2());
+ var g3 = (a = x) => { function x() {}; return a; };
+ assertEquals(1, g3());
+ var g4 = (a = eval("x")) => { var x; return a; };
+ assertEquals(1, g4());
+ var g5 = (a = () => eval("x")) => { var x; return a(); };
+ assertEquals(1, g5());
+
+ var f11 = function f(x = f) { let f; return x; }
+ assertSame(f11, f11());
+ var f12 = function f(x = f) { const f = 0; return x; }
+ assertSame(f12, f12());
+ var f13 = function f(x = f) { function f() {}; return x; }
+ assertSame(f13, f13());
+})();
+
+(function TestSloppyEvalScoping() {
+ var x = 1;
+
+ function f1(y = eval("var x = 2")) { with ({}) { return x; } }
+ assertEquals(1, f1());
+ function f2(y = eval("var x = 2"), z = x) { return z; }
+ assertEquals(1, f2());
+ assertEquals(1, f2(0));
+ function f3(y = eval("var x = 2"), z = eval("x")) { return z; }
+ assertEquals(1, f3());
+ assertEquals(1, f3(0));
+ function f41({[eval("var x = 2; 'a'")]: w}, z = x) { return z; }
+ assertEquals(1, f41({}));
+ assertEquals(1, f41({a: 0}));
+ function f42({[eval("var x = 2; 'a'")]: w}, z = eval("x")) { return z; }
+ assertEquals(1, f42({}));
+ assertEquals(1, f42({a: 0}));
+ function f43({a: w = eval("var x = 2")}, z = x) { return z; }
+ assertEquals(1, f43({}));
+ assertEquals(1, f43({a: 0}));
+ function f44({a: w = eval("var x = 2")}, z = eval("x")) { return z; }
+ assertEquals(1, f44({}));
+ assertEquals(1, f44({a: 0}));
+
+ function f5({a = eval("var x = 2"), b = x}) { return b; }
+ assertEquals(2, f5({}));
+ assertEquals(1, f5({a: 0}));
+ function f6({a = eval("var x = 2"), b = eval("x")}) { return b; }
+ assertEquals(2, f6({}));
+ assertEquals(1, f6({a: 0}));
+ function f71({[eval("var x = 2; 'a'")]: w, b = x}) { return b; }
+ assertEquals(2, f71({}));
+ assertEquals(2, f71({a: 0}));
+ function f72({[eval("var x = 2; 'a'")]: w, b = eval("x")}) { return b; }
+ assertEquals(2, f72({}));
+ assertEquals(2, f72({a: 0}));
+ function f73({a: w = eval("var x = 2"), b = x}) { return b; }
+ assertEquals(2, f73({}));
+ assertEquals(1, f73({a: 0}));
+ function f74({a: w = eval("var x = 2"), b = eval("x")}) { return b; }
+ assertEquals(2, f74({}));
+ assertEquals(1, f74({a: 0}));
+ function f8(y = (eval("var x = 2"), x)) { return y; }
+ assertEquals(2, f8());
+ assertEquals(0, f8(0));
+
+ function f11(z = eval("var y = 2")) { return y; }
+ assertThrows(f11, ReferenceError);
+ function f12(z = eval("var y = 2"), b = y) {}
+ assertThrows(f12, ReferenceError);
+ function f13(z = eval("var y = 2"), b = eval("y")) {}
+ assertThrows(f13, ReferenceError);
+
+ function f21(f = () => x) { eval("var x = 2"); return f() }
+ assertEquals(1, f21());
+ assertEquals(3, f21(() => 3));
+ function f22(f = () => eval("x")) { eval("var x = 2"); return f() }
+ assertEquals(1, f22());
+ assertEquals(3, f22(() => 3));
+
+ var g1 = (y = eval("var x = 2")) => { with ({}) { return x; } };
+ assertEquals(1, g1());
+ var g2 = (y = eval("var x = 2"), z = x) => { return z; };
+ assertEquals(1, g2());
+ assertEquals(1, g2(0));
+ var g3 = (y = eval("var x = 2"), z = eval("x")) => { return z; };
+ assertEquals(1, g3());
+ assertEquals(1, g3(0));
+ var g41 = ({[eval("var x = 2; 'a'")]: w}, z = x) => { return z; };
+ assertEquals(1, g41({}));
+ assertEquals(1, g41({a: 0}));
+ var g42 = ({[eval("var x = 2; 'a'")]: w}, z = eval("x")) => { return z; };
+ assertEquals(1, g42({}));
+ assertEquals(1, g42({a: 0}));
+ var g43 = ({a: w = eval("var x = 2")}, z = x) => { return z; };
+ assertEquals(1, g43({}));
+ assertEquals(1, g43({a: 0}));
+ var g44 = ({a: w = eval("var x = 2")}, z = eval("x")) => { return z; };
+ assertEquals(1, g44({}));
+ assertEquals(1, g44({a: 0}));
+
+ var g5 = ({a = eval("var x = 2"), b = x}) => { return b; };
+ assertEquals(2, g5({}));
+ assertEquals(1, g5({a: 0}));
+ var g6 = ({a = eval("var x = 2"), b = eval("x")}) => { return b; };
+ assertEquals(2, g6({}));
+ assertEquals(1, g6({a: 0}));
+ var g71 = ({[eval("var x = 2; 'a'")]: w, b = x}) => { return b; };
+ assertEquals(2, g71({}));
+ assertEquals(2, g71({a: 0}));
+ var g72 = ({[eval("var x = 2; 'a'")]: w, b = eval("x")}) => { return b; };
+ assertEquals(2, g72({}));
+ assertEquals(2, g72({a: 0}));
+ var g73 = ({a: w = eval("var x = 2"), b = x}) => { return b; };
+ assertEquals(2, g73({}));
+ assertEquals(1, g73({a: 0}));
+ var g74 = ({a: w = eval("var x = 2"), b = eval("x")}) => { return b; };
+ assertEquals(2, g74({}));
+ assertEquals(1, g74({a: 0}));
+ var g8 = (y = (eval("var x = 2"), x)) => { return y; };
+ assertEquals(2, g8());
+ assertEquals(0, g8(0));
+
+ var g11 = (z = eval("var y = 2")) => { return y; };
+ assertThrows(g11, ReferenceError);
+ var g12 = (z = eval("var y = 2"), b = y) => {};
+ assertThrows(g12, ReferenceError);
+ var g13 = (z = eval("var y = 2"), b = eval("y")) => {};
+ assertThrows(g13, ReferenceError);
+
+ var g21 = (f = () => x) => { eval("var x = 2"); return f() };
+ assertEquals(1, g21());
+ assertEquals(3, g21(() => 3));
+ var g22 = (f = () => eval("x")) => { eval("var x = 2"); return f() };
+ assertEquals(1, g22());
+ assertEquals(3, g22(() => 3));
+})();
+
+
+(function TestStrictEvalScoping() {
+ 'use strict';
+ var x = 1;
+
+ function f1(y = eval("var x = 2")) { return x; }
+ assertEquals(1, f1());
+ function f2(y = eval("var x = 2"), z = x) { return z; }
+ assertEquals(1, f2());
+ assertEquals(1, f2(0));
+ function f3(y = eval("var x = 2"), z = eval("x")) { return z; }
+ assertEquals(1, f3());
+ assertEquals(1, f3(0));
+ function f41({[eval("var x = 2; 'a'")]: w}, z = x) { return z; }
+ assertEquals(1, f41({}));
+ assertEquals(1, f41({a: 0}));
+ function f42({[eval("var x = 2; 'a'")]: w}, z = eval("x")) { return z; }
+ assertEquals(1, f42({}));
+ assertEquals(1, f42({a: 0}));
+ function f43({a: w = eval("var x = 2")}, z = x) { return z; }
+ assertEquals(1, f43({}));
+ assertEquals(1, f43({a: 0}));
+ function f44({a: w = eval("var x = 2")}, z = eval("x")) { return z; }
+ assertEquals(1, f44({}));
+ assertEquals(1, f44({a: 0}));
+
+ function f5({a = eval("var x = 2"), b = x}) { return b; }
+ assertEquals(1, f5({}));
+ assertEquals(1, f5({a: 0}));
+ function f6({a = eval("var x = 2"), b = eval("x")}) { return b; }
+ assertEquals(1, f6({}));
+ assertEquals(1, f6({a: 0}));
+ function f71({[eval("var x = 2; 'a'")]: w, b = x}) { return b; }
+ assertEquals(1, f71({}));
+ assertEquals(1, f71({a: 0}));
+ function f72({[eval("var x = 2; 'a'")]: w, b = eval("x")}) { return b; }
+ assertEquals(1, f72({}));
+ assertEquals(1, f72({a: 0}));
+ function f73({a: w = eval("var x = 2"), b = x}) { return b; }
+ assertEquals(1, f73({}));
+ assertEquals(1, f73({a: 0}));
+ function f74({a: w = eval("var x = 2"), b = eval("x")}) { return b; }
+ assertEquals(1, f74({}));
+ assertEquals(1, f74({a: 0}));
+ function f8(y = (eval("var x = 2"), x)) { return y; }
+ assertEquals(1, f8());
+ assertEquals(0, f8(0));
+
+ function f11(z = eval("var y = 2")) { return y; }
+ assertThrows(f11, ReferenceError);
+ function f12(z = eval("var y = 2"), b = y) {}
+ assertThrows(f12, ReferenceError);
+ function f13(z = eval("var y = 2"), b = eval("y")) {}
+ assertThrows(f13, ReferenceError);
+
+ function f21(f = () => x) { eval("var x = 2"); return f() }
+ assertEquals(1, f21());
+ assertEquals(3, f21(() => 3));
+ function f22(f = () => eval("x")) { eval("var x = 2"); return f() }
+ assertEquals(1, f22());
+ assertEquals(3, f22(() => 3));
+})();
-(function TestParameterTDZ() {
+(function TestParameterTDZSloppy() {
function f1(a = x, x) { return a }
assertThrows(() => f1(undefined, 4), ReferenceError);
assertEquals(4, f1(4, 5));
function f2(a = eval("x"), x) { return a }
assertThrows(() => f2(undefined, 4), ReferenceError);
assertEquals(4, f2(4, 5));
- function f3(a = eval("x"), x) { 'use strict'; return a }
+ function f3(a = eval("'use strict'; x"), x) { return a }
assertThrows(() => f3(undefined, 4), ReferenceError);
assertEquals(4, f3(4, 5));
- function f4(a = eval("'use strict'; x"), x) { return a }
- assertThrows(() => f4(undefined, 4), ReferenceError);
- assertEquals(4, f4(4, 5));
-
- function f5(a = () => x, x) { return a() }
+ function f4(a = () => x, x) { return a() }
+ assertEquals(4, f4(() => 4, 5));
+ function f5(a = () => eval("x"), x) { return a() }
assertEquals(4, f5(() => 4, 5));
- function f6(a = () => eval("x"), x) { return a() }
+ function f6(a = () => eval("'use strict'; x"), x) { return a() }
assertEquals(4, f6(() => 4, 5));
- function f7(a = () => eval("x"), x) { 'use strict'; return a() }
- assertEquals(4, f7(() => 4, 5));
- function f8(a = () => eval("'use strict'; x"), x) { return a() }
- assertEquals(4, f8(() => 4, 5));
function f11(a = x, x = 2) { return a }
assertThrows(() => f11(), ReferenceError);
@@ -195,36 +376,49 @@
assertThrows(() => f12(undefined), ReferenceError);
assertThrows(() => f12(undefined, 4), ReferenceError);
assertEquals(4, f12(4, 5));
- function f13(a = eval("x"), x = 2) { 'use strict'; return a }
+ function f13(a = eval("'use strict'; x"), x = 2) { return a }
assertThrows(() => f13(), ReferenceError);
assertThrows(() => f13(undefined), ReferenceError);
assertThrows(() => f13(undefined, 4), ReferenceError);
assertEquals(4, f13(4, 5));
- function f14(a = eval("'use strict'; x"), x = 2) { return a }
- assertThrows(() => f14(), ReferenceError);
- assertThrows(() => f14(undefined), ReferenceError);
- assertThrows(() => f14(undefined, 4), ReferenceError);
- assertEquals(4, f14(4, 5));
-
- function f34(x = function() { return a }, ...a) { return x()[0] }
- assertEquals(4, f34(undefined, 4));
- function f35(x = () => a, ...a) { return x()[0] }
- assertEquals(4, f35(undefined, 4));
- function f36(x = () => eval("a"), ...a) { return x()[0] }
- assertEquals(4, f36(undefined, 4));
- function f37(x = () => eval("a"), ...a) { 'use strict'; return x()[0] }
- assertEquals(4, f37(undefined, 4));
- function f38(x = () => { 'use strict'; return eval("a") }, ...a) { return x()[0] }
- assertEquals(4, f38(undefined, 4));
- function f39(x = () => eval("'use strict'; a"), ...a) { return x()[0] }
- assertEquals(4, f39(undefined, 4));
-
- var g34 = (x = function() { return a }, ...a) => { return x()[0] };
- assertEquals(4, g34(undefined, 4));
- var g35 = (x = () => a, ...a) => { return x()[0] };
- assertEquals(4, g35(undefined, 4));
+
+ function f21(x = function() { return a }, ...a) { return x()[0] }
+ assertEquals(4, f21(undefined, 4));
+ function f22(x = () => a, ...a) { return x()[0] }
+ assertEquals(4, f22(undefined, 4));
+ function f23(x = () => eval("a"), ...a) { return x()[0] }
+ assertEquals(4, f23(undefined, 4));
+ function f24(x = () => {'use strict'; return eval("a") }, ...a) {
+ return x()[0]
+ }
+ assertEquals(4, f24(undefined, 4));
+ function f25(x = () => eval("'use strict'; a"), ...a) { return x()[0] }
+ assertEquals(4, f25(undefined, 4));
+
+ var g1 = (x = function() { return a }, ...a) => { return x()[0] };
+ assertEquals(4, g1(undefined, 4));
+ var g2 = (x = () => a, ...a) => { return x()[0] };
+ assertEquals(4, g2(undefined, 4));
})();
+(function TestParameterTDZStrict() {
+ "use strict";
+
+ function f1(a = eval("x"), x) { return a }
+ assertThrows(() => f1(undefined, 4), ReferenceError);
+ assertEquals(4, f1(4, 5));
+ function f2(a = () => eval("x"), x) { return a() }
+ assertEquals(4, f2(() => 4, 5));
+
+ function f11(a = eval("x"), x = 2) { return a }
+ assertThrows(() => f11(), ReferenceError);
+ assertThrows(() => f11(undefined), ReferenceError);
+ assertThrows(() => f11(undefined, 4), ReferenceError);
+ assertEquals(4, f11(4, 5));
+
+ function f21(x = () => eval("a"), ...a) { return x()[0] }
+ assertEquals(4, f21(undefined, 4));
+})();
(function TestArgumentsForNonSimpleParameters() {
function f1(x = 900) { arguments[0] = 1; return x }
@@ -237,15 +431,29 @@
(function TestFunctionLength() {
- // TODO(rossberg): Fix arity.
- // assertEquals(0, (function(x = 1) {}).length);
- // assertEquals(0, (function(x = 1, ...a) {}).length);
- // assertEquals(1, (function(x, y = 1) {}).length);
- // assertEquals(1, (function(x, y = 1, ...a) {}).length);
- // assertEquals(2, (function(x, y, z = 1) {}).length);
- // assertEquals(2, (function(x, y, z = 1, ...a) {}).length);
- // assertEquals(1, (function(x, y = 1, z) {}).length);
- // assertEquals(1, (function(x, y = 1, z, ...a) {}).length);
- // assertEquals(1, (function(x, y = 1, z, v = 2) {}).length);
- // assertEquals(1, (function(x, y = 1, z, v = 2, ...a) {}).length);
+ assertEquals(0, (function(x = 1) {}).length);
+ assertEquals(0, (function(x = 1, ...a) {}).length);
+ assertEquals(1, (function(x, y = 1) {}).length);
+ assertEquals(1, (function(x, y = 1, ...a) {}).length);
+ assertEquals(2, (function(x, y, z = 1) {}).length);
+ assertEquals(2, (function(x, y, z = 1, ...a) {}).length);
+ assertEquals(1, (function(x, y = 1, z) {}).length);
+ assertEquals(1, (function(x, y = 1, z, ...a) {}).length);
+ assertEquals(1, (function(x, y = 1, z, v = 2) {}).length);
+ assertEquals(1, (function(x, y = 1, z, v = 2, ...a) {}).length);
+})();
+
+(function TestDirectiveThrows() {
+ "use strict";
+
+ assertThrows(function(){ eval("function(x=1){'use strict';}") }, SyntaxError);
+ assertThrows(function(){ eval("(x=1) => {'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(class{foo(x=1) {'use strict';}});") }, SyntaxError);
+
+ assertThrows(
+ function(){ eval("function(a, x=1){'use strict';}") }, SyntaxError);
+ assertThrows(function(){ eval("(a, x=1) => {'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(class{foo(a, x=1) {'use strict';}});") }, SyntaxError);
})();
diff --git a/deps/v8/test/mjsunit/harmony/destructuring.js b/deps/v8/test/mjsunit/harmony/destructuring.js
index 69e144b26f..f1e2210a2a 100644
--- a/deps/v8/test/mjsunit/harmony/destructuring.js
+++ b/deps/v8/test/mjsunit/harmony/destructuring.js
@@ -6,9 +6,11 @@
// Flags: --harmony-default-parameters --harmony-rest-parameters
(function TestObjectLiteralPattern() {
- var { x : x, y : y } = { x : 1, y : 2 };
+ var { x : x, y : y, get, set } = { x : 1, y : 2, get: 3, set: 4 };
assertEquals(1, x);
assertEquals(2, y);
+ assertEquals(3, get);
+ assertEquals(4, set);
var {z} = { z : 3 };
assertEquals(3, z);
@@ -741,16 +743,22 @@
assertEquals(1, f1({}));
function f2({a = x}) { function x() {}; return a; }
assertEquals(1, f2({}));
- function f3({a = x}) { 'use strict'; let x = 2; return a; }
- assertEquals(1, f3({}));
- function f4({a = x}) { 'use strict'; const x = 2; return a; }
- assertEquals(1, f4({}));
- function f5({a = x}) { 'use strict'; function x() {}; return a; }
- assertEquals(1, f5({}));
+ (function() {
+ 'use strict';
+ function f3({a = x}) { let x = 2; return a; }
+ assertEquals(1, f3({}));
+ function f4({a = x}) { const x = 2; return a; }
+ assertEquals(1, f4({}));
+ function f5({a = x}) { function x() {}; return a; }
+ assertEquals(1, f5({}));
+ })();
function f6({a = eval("x")}) { var x; return a; }
assertEquals(1, f6({}));
- function f61({a = eval("x")}) { 'use strict'; var x; return a; }
- assertEquals(1, f61({}));
+ (function() {
+ 'use strict';
+ function f61({a = eval("x")}) { var x; return a; }
+ assertEquals(1, f61({}));
+ })();
function f62({a = eval("'use strict'; x")}) { var x; return a; }
assertEquals(1, f62({}));
function f7({a = function() { return x }}) { var x; return a(); }
@@ -759,8 +767,11 @@
assertEquals(1, f8({}));
function f9({a = () => eval("x")}) { var x; return a(); }
assertEquals(1, f9({}));
- function f91({a = () => eval("x")}) { 'use strict'; var x; return a(); }
- assertEquals(1, f91({}));
+ (function TestInitializedWithEvalArrowStrict() {
+ 'use strict';
+ function f91({a = () => eval("x")}) { var x; return a(); }
+ assertEquals(1, f91({}));
+ })();
function f92({a = () => { 'use strict'; return eval("x") }}) { var x; return a(); }
assertEquals(1, f92({}));
function f93({a = () => eval("'use strict'; x")}) { var x; return a(); }
@@ -770,16 +781,22 @@
assertEquals(1, g1({}));
var g2 = ({a = x}) => { function x() {}; return a; };
assertEquals(1, g2({}));
- var g3 = ({a = x}) => { 'use strict'; let x = 2; return a; };
- assertEquals(1, g3({}));
- var g4 = ({a = x}) => { 'use strict'; const x = 2; return a; };
- assertEquals(1, g4({}));
- var g5 = ({a = x}) => { 'use strict'; function x() {}; return a; };
- assertEquals(1, g5({}));
+ (function() {
+ 'use strict';
+ var g3 = ({a = x}) => { let x = 2; return a; };
+ assertEquals(1, g3({}));
+ var g4 = ({a = x}) => { const x = 2; return a; };
+ assertEquals(1, g4({}));
+ var g5 = ({a = x}) => { function x() {}; return a; };
+ assertEquals(1, g5({}));
+ })();
var g6 = ({a = eval("x")}) => { var x; return a; };
assertEquals(1, g6({}));
- var g61 = ({a = eval("x")}) => { 'use strict'; var x; return a; };
- assertEquals(1, g61({}));
+ (function() {
+ 'use strict';
+ var g61 = ({a = eval("x")}) => { var x; return a; };
+ assertEquals(1, g61({}));
+ })();
var g62 = ({a = eval("'use strict'; x")}) => { var x; return a; };
assertEquals(1, g62({}));
var g7 = ({a = function() { return x }}) => { var x; return a(); };
@@ -788,10 +805,13 @@
assertEquals(1, g8({}));
var g9 = ({a = () => eval("x")}) => { var x; return a(); };
assertEquals(1, g9({}));
- var g91 = ({a = () => eval("x")}) => { 'use strict'; var x; return a(); };
- assertEquals(1, g91({}));
- var g92 = ({a = () => { 'use strict'; return eval("x") }}) => { var x; return a(); };
- assertEquals(1, g92({}));
+ (function() {
+ 'use strict';
+ var g91 = ({a = () => eval("x")}) => { var x; return a(); };
+ assertEquals(1, g91({}));
+ var g92 = ({a = () => { return eval("x") }}) => { var x; return a(); };
+ assertEquals(1, g92({}));
+ })();
var g93 = ({a = () => eval("'use strict'; x")}) => { var x; return a(); };
assertEquals(1, g93({}));
@@ -799,12 +819,15 @@
assertSame(f11, f11({}));
var f12 = function f({x = f}) { function f() {}; return x; }
assertSame(f12, f12({}));
- var f13 = function f({x = f}) { 'use strict'; let f; return x; }
- assertSame(f13, f13({}));
- var f14 = function f({x = f}) { 'use strict'; const f = 0; return x; }
- assertSame(f14, f14({}));
- var f15 = function f({x = f}) { 'use strict'; function f() {}; return x; }
- assertSame(f15, f15({}));
+ (function() {
+ 'use strict';
+ var f13 = function f({x = f}) { let f; return x; }
+ assertSame(f13, f13({}));
+ var f14 = function f({x = f}) { const f = 0; return x; }
+ assertSame(f14, f14({}));
+ var f15 = function f({x = f}) { function f() {}; return x; }
+ assertSame(f15, f15({}));
+ })();
var f16 = function f({f = 7, x = f}) { return x; }
assertSame(7, f16({}));
@@ -827,9 +850,12 @@
function f2({a = eval("x")}, x) { return a }
assertThrows(() => f2({}, 4), ReferenceError);
assertEquals(4, f2({a: 4}, 5));
- function f3({a = eval("x")}, x) { 'use strict'; return a }
- assertThrows(() => f3({}, 4), ReferenceError);
- assertEquals(4, f3({a: 4}, 5));
+ (function() {
+ 'use strict';
+ function f3({a = eval("x")}, x) { return a }
+ assertThrows(() => f3({}, 4), ReferenceError);
+ assertEquals(4, f3({a: 4}, 5));
+ })();
function f4({a = eval("'use strict'; x")}, x) { return a }
assertThrows(() => f4({}, 4), ReferenceError);
assertEquals(4, f4({a: 4}, 5));
@@ -838,8 +864,11 @@
assertEquals(4, f5({a: () => 4}, 5));
function f6({a = () => eval("x")}, x) { return a() }
assertEquals(4, f6({a: () => 4}, 5));
- function f7({a = () => eval("x")}, x) { 'use strict'; return a() }
- assertEquals(4, f7({a: () => 4}, 5));
+ (function() {
+ 'use strict';
+ function f7({a = () => eval("x")}, x) { return a() }
+ assertEquals(4, f7({a: () => 4}, 5));
+ })();
function f8({a = () => eval("'use strict'; x")}, x) { return a() }
assertEquals(4, f8({a: () => 4}, 5));
@@ -849,9 +878,12 @@
function f12({a = eval("b")}, {b}) { return a }
assertThrows(() => f12({}, {b: 4}), ReferenceError);
assertEquals(4, f12({a: 4}, {b: 5}));
- function f13({a = eval("b")}, {b}) { 'use strict'; return a }
- assertThrows(() => f13({}, {b: 4}), ReferenceError);
- assertEquals(4, f13({a: 4}, {b: 5}));
+ (function() {
+ 'use strict';
+ function f13({a = eval("b")}, {b}) { return a }
+ assertThrows(() => f13({}, {b: 4}), ReferenceError);
+ assertEquals(4, f13({a: 4}, {b: 5}));
+ })();
function f14({a = eval("'use strict'; b")}, {b}) { return a }
assertThrows(() => f14({}, {b: 4}), ReferenceError);
assertEquals(4, f14({a: 4}, {b: 5}));
@@ -860,8 +892,11 @@
assertEquals(4, f15({a: () => 4}, {b: 5}));
function f16({a = () => eval("b")}, {b}) { return a() }
assertEquals(4, f16({a: () => 4}, {b: 5}));
- function f17({a = () => eval("b")}, {b}) { 'use strict'; return a() }
- assertEquals(4, f17({a: () => 4}, {b: 5}));
+ (function() {
+ 'use strict';
+ function f17({a = () => eval("b")}, {b}) { return a() }
+ assertEquals(4, f17({a: () => 4}, {b: 5}));
+ })();
function f18({a = () => eval("'use strict'; b")}, {b}) { return a() }
assertEquals(4, f18({a: () => 4}, {b: 5}));
@@ -885,8 +920,11 @@
assertEquals(4, f35({}, 4));
function f36({x = () => eval("a")}, ...a) { return x()[0] }
assertEquals(4, f36({}, 4));
- function f37({x = () => eval("a")}, ...a) { 'use strict'; return x()[0] }
- assertEquals(4, f37({}, 4));
+ (function() {
+ 'use strict';
+ function f37({x = () => eval("a")}, ...a) { return x()[0] }
+ assertEquals(4, f37({}, 4));
+ })();
function f38({x = () => { 'use strict'; return eval("a") }}, ...a) { return x()[0] }
assertEquals(4, f38({}, 4));
function f39({x = () => eval("'use strict'; a")}, ...a) { return x()[0] }
@@ -953,3 +991,35 @@
assertThrows("'use strict'; let x = {}; for (let [x, y] in {x});", ReferenceError);
assertThrows("'use strict'; let x = {}; for (let [y, x] in {x});", ReferenceError);
}());
+
+
+(function TestFunctionLength() {
+ assertEquals(1, (function({}) {}).length);
+ assertEquals(1, (function([]) {}).length);
+ assertEquals(1, (function({x}) {}).length);
+ assertEquals(1, (function({}, ...a) {}).length);
+ assertEquals(1, (function({x}, {y} = {}) {}).length);
+ assertEquals(1, (function({x}, {y} = {}, ...a) {}).length);
+ assertEquals(2, (function(x, {y}, {z} = {}) {}).length);
+ assertEquals(2, (function({x}, {}, {z} = {}, ...a) {}).length);
+ assertEquals(1, (function(x, {y} = {}, {z}) {}).length);
+ assertEquals(1, (function({x}, {y} = {}, {z}, ...a) {}).length);
+ assertEquals(1, (function(x, {y} = {}, {z}, {v} = {}) {}).length);
+ assertEquals(1, (function({x}, {y} = {}, {z}, {v} = {}, ...a) {}).length);
+})();
+
+
+(function TestDirectiveThrows() {
+ "use strict";
+
+ assertThrows(function(){ eval("function({}){'use strict';}") }, SyntaxError);
+ assertThrows(function(){ eval("({}) => {'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(class{foo({}) {'use strict';}});") }, SyntaxError);
+
+ assertThrows(
+ function(){ eval("function(a, {}){'use strict';}") }, SyntaxError);
+ assertThrows(function(){ eval("(a, {}) => {'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(class{foo(a, {}) {'use strict';}});") }, SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/futex.js b/deps/v8/test/mjsunit/harmony/futex.js
index c7e1f5ce2a..06c0a25ba1 100644
--- a/deps/v8/test/mjsunit/harmony/futex.js
+++ b/deps/v8/test/mjsunit/harmony/futex.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony-atomics --harmony-sharedarraybuffer
+// Flags: --allow-natives-syntax --harmony-sharedarraybuffer
(function TestFailsWithNonSharedArray() {
var ab = new ArrayBuffer(16);
diff --git a/deps/v8/test/mjsunit/harmony/proxies-function.js b/deps/v8/test/mjsunit/harmony/proxies-function.js
index c024cef948..3c36a4f204 100644
--- a/deps/v8/test/mjsunit/harmony/proxies-function.js
+++ b/deps/v8/test/mjsunit/harmony/proxies-function.js
@@ -106,10 +106,10 @@ function TestCall(isStrict, callTrap) {
assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
assertSame(o, receiver)
receiver = 333
- assertEquals(42, %Call(o, 11, 31, f))
+ assertEquals(42, %Call(f, o, 11, 31));
assertSame(o, receiver)
receiver = 333
- assertEquals(42, %Call(null, 11, 31, f))
+ assertEquals(42, %Call(f, null, 11, 31));
assertSame(isStrict ? null : global_object, receiver)
receiver = 333
assertEquals(42, %Apply(f, o, [11, 31], 0, 2))
@@ -136,10 +136,10 @@ function TestCall(isStrict, callTrap) {
assertEquals(32, Function.prototype.apply.call(ff, {}, [20]))
assertSame(o, receiver)
receiver = 333
- assertEquals(23, %Call({}, 11, ff))
+ assertEquals(23, %Call(ff, {}, 11));
assertSame(o, receiver)
receiver = 333
- assertEquals(23, %Call({}, 11, 3, ff))
+ assertEquals(23, %Call(ff, {}, 11, 3));
assertSame(o, receiver)
receiver = 333
assertEquals(24, %Apply(ff, {}, [12, 13], 0, 1))
@@ -166,10 +166,10 @@ function TestCall(isStrict, callTrap) {
assertEquals(42, Function.prototype.apply.call(fff, {}))
assertSame(o, receiver)
receiver = 333
- assertEquals(42, %Call({}, fff))
+ assertEquals(42, %Call(fff, {}));
assertSame(o, receiver)
receiver = 333
- assertEquals(42, %Call({}, 11, 3, fff))
+ assertEquals(42, %Call(fff, {}, 11, 3))
assertSame(o, receiver)
receiver = 333
assertEquals(42, %Apply(fff, {}, [], 0, 0))
@@ -211,7 +211,7 @@ function TestCall(isStrict, callTrap) {
assertEquals(32, Function.prototype.apply.call(f, o, [17, 15]))
assertSame(o, receiver)
receiver = 333
- assertEquals(23, %Call(o, 11, 12, f))
+ assertEquals(23, %Call(f, o, 11, 12))
assertSame(o, receiver)
receiver = 333
assertEquals(27, %Apply(f, o, [12, 13, 14], 1, 2))
@@ -280,8 +280,8 @@ function TestCallThrow(callTrap) {
assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
- assertThrows(function(){ %Call({}, f) }, "myexn")
- assertThrows(function(){ %Call({}, 1, 2, f) }, "myexn")
+ assertThrows(function(){ %Call(f, {}) }, "myexn")
+ assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
assertThrows(function(){ %_CallFunction({}, f) }, "myexn")
@@ -293,8 +293,8 @@ function TestCallThrow(callTrap) {
assertThrows(function(){ ({x: f})["x"](11) }, "myexn")
assertThrows(function(){ Function.prototype.call.call(f, {}, 2) }, "myexn")
assertThrows(function(){ Function.prototype.apply.call(f, {}, [1]) }, "myexn")
- assertThrows(function(){ %Call({}, f) }, "myexn")
- assertThrows(function(){ %Call({}, 1, 2, f) }, "myexn")
+ assertThrows(function(){ %Call(f, {}) }, "myexn")
+ assertThrows(function(){ %Call(f, {}, 1, 2) }, "myexn")
assertThrows(function(){ %Apply({}, f, [], 3, 0) }, "myexn")
assertThrows(function(){ %Apply({}, f, [3, 4], 0, 1) }, "myexn")
assertThrows(function(){ %_CallFunction({}, f) }, "myexn")
@@ -698,7 +698,7 @@ function TestCalls() {
function(f, x, y, o) { return Function.prototype.call.call(f, o, x, y) },
function(f, x, y, o) { return Function.prototype.apply.call(f, o, [x, y]) },
function(f, x, y, o) { return %_CallFunction(o, x, y, f) },
- function(f, x, y, o) { return %Call(o, x, y, f) },
+ function(f, x, y, o) { return %Call(f, o, x, y) },
function(f, x, y, o) { return %Apply(f, o, [null, x, y, null], 1, 2) },
function(f, x, y, o) { return %Apply(f, o, arguments, 2, 2) },
function(f, x, y, o) { if (typeof o == "object") return o.f(x, y) },
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4211.js b/deps/v8/test/mjsunit/harmony/regress/regress-4211.js
new file mode 100644
index 0000000000..8affc7344a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4211.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-arrow-functions --harmony-rest-parameters
+
+assertThrows("()=>{}()", SyntaxError);
+assertThrows("x=>{}()", SyntaxError);
+assertThrows("(...x)=>{}()", SyntaxError);
+assertThrows("(x)=>{}()", SyntaxError);
+assertThrows("(x,y)=>{}()", SyntaxError);
+assertThrows("(x,y,...z)=>{}()", SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-4400.js b/deps/v8/test/mjsunit/harmony/regress/regress-4400.js
new file mode 100644
index 0000000000..7c42e4f557
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-4400.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-default-parameters --min-preparse-length=0
+
+function borked(a = [], b = {}, c) {}
+borked();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-508074.js b/deps/v8/test/mjsunit/harmony/regress/regress-508074.js
new file mode 100644
index 0000000000..d2864bb956
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-508074.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-rest-parameters --harmony-arrow-functions
+// Flags: --allow-natives-syntax
+
+var f = (a, b, ...c) => {
+ print(a);
+ print(b);
+ print(c);
+ assertEquals(6, a);
+ assertEquals(5, b);
+ assertEquals([4, 3, 2, 1], c);
+};
+
+function g() {
+ f(6, 5, 4, 3, 2, 1);
+};
+
+g();
+g();
+g();
+
+%OptimizeFunctionOnNextCall(g);
+g();
diff --git a/deps/v8/test/mjsunit/harmony/rest-params.js b/deps/v8/test/mjsunit/harmony/rest-params.js
index fe7ee2adb8..6ceb87e331 100644
--- a/deps/v8/test/mjsunit/harmony/rest-params.js
+++ b/deps/v8/test/mjsunit/harmony/rest-params.js
@@ -16,18 +16,22 @@
return args.length; })(1,2,3,4,5));
})();
-function strictTest(a, b, ...c) {
+
+var strictTest = (function() {
"use strict";
- assertEquals(Array, c.constructor);
- assertTrue(Array.isArray(c));
+ return function strictTest(a, b, ...c) {
+ assertEquals(Array, c.constructor);
+ assertTrue(Array.isArray(c));
- var expectedLength = arguments.length >= 3 ? arguments.length - 2 : 0;
- assertEquals(expectedLength, c.length);
+ var expectedLength = arguments.length >= 3 ? arguments.length - 2 : 0;
+ assertEquals(expectedLength, c.length);
+
+ for (var i = 2, j = 0; i < arguments.length; ++i) {
+ assertEquals(c[j++], arguments[i]);
+ }
+ };
+})();
- for (var i = 2, j = 0; i < arguments.length; ++i) {
- assertEquals(c[j++], arguments[i]);
- }
-}
function sloppyTest(a, b, ...c) {
assertEquals(Array, c.constructor);
@@ -144,14 +148,15 @@ var O = {
(function testNoAliasArgumentsStrict() {
- function strictF(a, ...rest) {
+ ((function() {
"use strict";
- arguments[0] = 1;
- assertEquals(3, a);
- arguments[1] = 2;
- assertArrayEquals([4, 5], rest);
- }
- strictF(3, 4, 5);
+ return (function strictF(a, ...rest) {
+ arguments[0] = 1;
+ assertEquals(3, a);
+ arguments[1] = 2;
+ assertArrayEquals([4, 5], rest);
+ });
+ })())(3, 4, 5);
})();
@@ -166,22 +171,6 @@ var O = {
})();
-/* TODO(caitp): support arrow functions (blocked on spread operator support)
-(function testRestParamsArrowFunctions() {
- "use strict";
-
- var fn = (a, b, ...c) => c;
- assertEquals([], fn());
- assertEquals([], fn(1, 2));
- assertEquals([3], fn(1, 2, 3));
- assertEquals([3, 4], fn(1, 2, 3, 4));
- assertEquals([3, 4, 5], fn(1, 2, 3, 4, 5));
- assertThrows("var x = ...y => y;", SyntaxError);
- assertEquals([], ((...args) => args)());
- assertEquals([1,2,3], ((...args) => args)(1,2,3));
-})();*/
-
-
(function testRestParamsWithNewTarget() {
"use strict";
class Base {
@@ -212,3 +201,21 @@ var O = {
assertEquals([1, 2, 3], c.child);
assertEquals([1, 2, 3], c.base);
})();
+
+(function TestDirectiveThrows() {
+ "use strict";
+
+ assertThrows(
+ function(){ eval("function(...rest){'use strict';}") }, SyntaxError);
+ assertThrows(function(){ eval("(...rest) => {'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(class{foo(...rest) {'use strict';}});") }, SyntaxError);
+
+ assertThrows(
+ function(){ eval("function(a, ...rest){'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(a, ...rest) => {'use strict';}") }, SyntaxError);
+ assertThrows(
+ function(){ eval("(class{foo(a, ...rest) {'use strict';}});") },
+ SyntaxError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/simd.js b/deps/v8/test/mjsunit/harmony/simd.js
index 0c52072646..fe4b0358f9 100644
--- a/deps/v8/test/mjsunit/harmony/simd.js
+++ b/deps/v8/test/mjsunit/harmony/simd.js
@@ -45,9 +45,9 @@ function isValidSimdString(string, value, type, lanes) {
}
-var simdTypeNames = ['Float32x4', 'Int32x4', 'Bool32x4',
- 'Int16x8', 'Bool16x8',
- 'Int8x16', 'Bool8x16'];
+var simdTypeNames = ['Float32x4', 'Int32x4', 'Uint32x4', 'Bool32x4',
+ 'Int16x8', 'Uint16x8', 'Bool16x8',
+ 'Int8x16', 'Uint8x16', 'Bool8x16'];
var nonSimdValues = [347, 1.275, NaN, "string", null, undefined, {},
function() {}];
@@ -212,6 +212,24 @@ function TestCoercions(type, lanes) {
test(4294967296, 0);
test(4294967297, 1);
break;
+ case 'Uint32x4':
+ test(Infinity, 0);
+ test(-Infinity, 0);
+ test(NaN, 0);
+ test(0, 0);
+ test(-0, 0);
+ test(Number.MIN_VALUE, 0);
+ test(-Number.MIN_VALUE, 0);
+ test(0.1, 0);
+ test(-0.1, 0);
+ test(1, 1);
+ test(1.1, 1);
+ test(-1, 4294967295);
+ test(-1.6, 4294967295);
+ test(4294967295, 4294967295);
+ test(4294967296, 0);
+ test(4294967297, 1);
+ break;
case 'Int16x8':
test(Infinity, 0);
test(-Infinity, 0);
@@ -233,6 +251,24 @@ function TestCoercions(type, lanes) {
test(65536, 0);
test(65537, 1);
break;
+ case 'Uint16x8':
+ test(Infinity, 0);
+ test(-Infinity, 0);
+ test(NaN, 0);
+ test(0, 0);
+ test(-0, 0);
+ test(Number.MIN_VALUE, 0);
+ test(-Number.MIN_VALUE, 0);
+ test(0.1, 0);
+ test(-0.1, 0);
+ test(1, 1);
+ test(1.1, 1);
+ test(-1, 65535);
+ test(-1.6, 65535);
+ test(65535, 65535);
+ test(65536, 0);
+ test(65537, 1);
+ break;
case 'Int8x16':
test(Infinity, 0);
test(-Infinity, 0);
@@ -254,6 +290,24 @@ function TestCoercions(type, lanes) {
test(256, 0);
test(257, 1);
break;
+ case 'Uint8x16':
+ test(Infinity, 0);
+ test(-Infinity, 0);
+ test(NaN, 0);
+ test(0, 0);
+ test(-0, 0);
+ test(Number.MIN_VALUE, 0);
+ test(-Number.MIN_VALUE, 0);
+ test(0.1, 0);
+ test(-0.1, 0);
+ test(1, 1);
+ test(1.1, 1);
+ test(-1, 255);
+ test(-1.6, 255);
+ test(255, 255);
+ test(256, 0);
+ test(257, 1);
+ break;
case 'Bool32x4':
case 'Bool16x8':
case 'Bool8x16':
@@ -330,8 +384,11 @@ function TestEquality(type, lanes) {
test(NaN, NaN);
break;
case 'Int32x4':
+ case 'Uint32x4':
case 'Int16x8':
+ case 'Uint16x8':
case 'Int8x16':
+ case 'Uint8x16':
test(1, 2);
test(1, 1);
test(1, -1);
@@ -381,8 +438,11 @@ function TestSameValue(type, lanes) {
test(NaN, NaN);
break;
case 'Int32x4':
+ case 'Uint32x4':
case 'Int16x8':
+ case 'Uint16x8':
case 'Int8x16':
+ case 'Uint8x16':
test(1, 2);
test(1, 1);
test(1, -1);
diff --git a/deps/v8/test/mjsunit/harmony/spread-call-new-class.js b/deps/v8/test/mjsunit/harmony/spread-call-new-class.js
index fcd0a21bc9..ed95e6cad7 100644
--- a/deps/v8/test/mjsunit/harmony/spread-call-new-class.js
+++ b/deps/v8/test/mjsunit/harmony/spread-call-new-class.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-spreadcalls --harmony-sloppy --harmony-rest-parameters
+// Flags: --harmony-spread-calls --harmony-sloppy --harmony-rest-parameters
(function testConstructClassStrict() {
diff --git a/deps/v8/test/mjsunit/harmony/spread-call-new.js b/deps/v8/test/mjsunit/harmony/spread-call-new.js
index 78f873e14b..4ae115e791 100644
--- a/deps/v8/test/mjsunit/harmony/spread-call-new.js
+++ b/deps/v8/test/mjsunit/harmony/spread-call-new.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-spreadcalls
+// Flags: --harmony-spread-calls
(function testNonConstructorStrict() {
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/spread-call-super-property.js b/deps/v8/test/mjsunit/harmony/spread-call-super-property.js
index cdf6f2e242..cc6cf849bd 100644
--- a/deps/v8/test/mjsunit/harmony/spread-call-super-property.js
+++ b/deps/v8/test/mjsunit/harmony/spread-call-super-property.js
@@ -2,19 +2,30 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-spreadcalls --harmony-sloppy --harmony-rest-parameters
+// Flags: --harmony-spread-calls --harmony-sloppy --harmony-rest-parameters
-(function testCallSuperProperty() {
+(function testCallSuperPropertyStrict() {
+ "use strict";
class BaseClass {
- strict_method(...args) { "use strict"; return [this].concat(args); }
- sloppy_method(...args) { return [this].concat(args); }
+ method(...args) { return [this].concat(args); }
}
class SubClass extends BaseClass {
- strict_m(...args) { return super.strict_method(...args); }
- sloppy_m(...args) { return super.sloppy_method(...args); }
+ method(...args) { return super.method(...args); }
}
var c = new SubClass();
- assertEquals([c, 1, 2, 3, 4, 5], c.strict_m(1, 2, 3, 4, 5));
- assertEquals([c, 1, 2, 3, 4, 5], c.sloppy_m(1, 2, 3, 4, 5));
+ assertEquals([c, 1, 2, 3, 4, 5], c.method(1, 2, 3, 4, 5));
+})();
+
+
+(function testCallSuperPropertySloppy() {
+ class BaseClass {
+ method(...args) { return [this].concat(args); }
+ }
+ class SubClass extends BaseClass {
+ method(...args) { return super.method(...args); }
+ }
+
+ var c = new SubClass();
+ assertEquals([c, 1, 2, 3, 4, 5], c.method(1, 2, 3, 4, 5));
})();
diff --git a/deps/v8/test/mjsunit/harmony/spread-call.js b/deps/v8/test/mjsunit/harmony/spread-call.js
index e10a3ec793..7c19ec1ef1 100644
--- a/deps/v8/test/mjsunit/harmony/spread-call.js
+++ b/deps/v8/test/mjsunit/harmony/spread-call.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-spreadcalls
+// Flags: --harmony-spread-calls
(function testSpreadCallsStrict() {
"use strict"
diff --git a/deps/v8/test/mjsunit/harmony/super.js b/deps/v8/test/mjsunit/harmony/super.js
index 601addaa0e..d74484ea04 100644
--- a/deps/v8/test/mjsunit/harmony/super.js
+++ b/deps/v8/test/mjsunit/harmony/super.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-arrow-functions --allow-natives-syntax
-// Flags: --harmony-spreadcalls --harmony-destructuring
+// Flags: --harmony-spread-calls --harmony-destructuring
// Flags: --harmony-rest-parameters --harmony-sloppy
(function TestSuperNamedLoads() {
@@ -81,39 +81,6 @@
}());
-(function TestSuperNumericKeyedLoads() {
- var x = 1;
- var derivedDataProperty = 2;
- var f = 3;
-
- function Base() { }
- function fBase() { return "Base " + this.toString(); }
- Base.prototype[f] = %ToMethod(fBase, Base.prototype);
- Base.prototype[x] = 15;
- Base.prototype.toString = function() { return "this is Base"; };
-
- function Derived() {
- this[derivedDataProperty] = "xxx";
- }
- Derived.prototype = {
- __proto__: Base.prototype,
- toString() { return "this is Derived"; },
- 1: 27,
- 3() {
- assertEquals("Base this is Derived", super[f]());
- var a = super[x];
- assertEquals(15, a);
- assertEquals(15, super[x]);
- assertEquals(27, this[x]);
- return "Derived";
- }
- };
-
- assertEquals("Base this is Base", new Base()[f]());
- assertEquals("Derived", new Derived()[f]());
-}());
-
-
(function TestSuperKeywordNonMethod() {
'use strict';
diff --git a/deps/v8/test/mjsunit/harmony/to-name.js b/deps/v8/test/mjsunit/harmony/to-name.js
new file mode 100644
index 0000000000..6d5d64e5e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/to-name.js
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals("1", %ToName(1));
+assertEquals("1", %_ToName(1));
+
+assertEquals("0.5", %ToName(.5));
+assertEquals("0.5", %_ToName(.5));
+
+assertEquals("null", %ToName(null));
+assertEquals("null", %_ToName(null));
+
+assertEquals("true", %ToName(true));
+assertEquals("true", %_ToName(true));
+
+assertEquals("false", %ToName(false));
+assertEquals("false", %_ToName(false));
+
+assertEquals("undefined", %ToName(undefined));
+assertEquals("undefined", %_ToName(undefined));
+
+assertEquals("random text", %ToName("random text"));
+assertEquals("random text", %_ToName("random text"));
+
+assertEquals(Symbol.toPrimitive, %ToName(Symbol.toPrimitive));
+assertEquals(Symbol.toPrimitive, %_ToName(Symbol.toPrimitive));
+
+var a = { toString: function() { return "xyz" }};
+assertEquals("xyz", %ToName(a));
+assertEquals("xyz", %_ToName(a));
+
+var b = { valueOf: function() { return 42 }};
+assertEquals("[object Object]", %ToName(b));
+assertEquals("[object Object]", %_ToName(b));
+
+var c = {
+ toString: function() { return "x"},
+ valueOf: function() { return 123 }
+};
+assertEquals("x", %ToName(c));
+assertEquals("x", %_ToName(c));
+
+var d = {
+ [Symbol.toPrimitive]: function(hint) { return hint }
+};
+assertEquals("string", %ToName(d));
+assertEquals("string", %_ToName(d));
+
+var e = new Date(0);
+assertEquals(e.toString(), %ToName(e));
+assertEquals(e.toString(), %_ToName(e));
diff --git a/deps/v8/test/mjsunit/harmony/to-number.js b/deps/v8/test/mjsunit/harmony/to-number.js
new file mode 100644
index 0000000000..6dc4db59a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/to-number.js
@@ -0,0 +1,61 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals(1, %ToNumber(1));
+assertEquals(1, %_ToNumber(1));
+
+assertEquals(.5, %ToNumber(.5));
+assertEquals(.5, %_ToNumber(.5));
+
+assertEquals(0, %ToNumber(null));
+assertEquals(0, %_ToNumber(null));
+
+assertEquals(1, %ToNumber(true));
+assertEquals(1, %_ToNumber(true));
+
+assertEquals(0, %ToNumber(false));
+assertEquals(0, %_ToNumber(false));
+
+assertEquals(NaN, %ToNumber(undefined));
+assertEquals(NaN, %_ToNumber(undefined));
+
+assertEquals(-1, %ToNumber("-1"));
+assertEquals(-1, %_ToNumber("-1"));
+assertEquals(123, %ToNumber("123"));
+assertEquals(123, %_ToNumber("123"));
+assertEquals(NaN, %ToNumber("random text"));
+assertEquals(NaN, %_ToNumber("random text"));
+
+assertThrows(function() { %ToNumber(Symbol.toPrimitive) }, TypeError);
+assertThrows(function() { %_ToNumber(Symbol.toPrimitive) }, TypeError);
+
+var a = { toString: function() { return 54321 }};
+assertEquals(54321, %ToNumber(a));
+assertEquals(54321, %_ToNumber(a));
+
+var b = { valueOf: function() { return 42 }};
+assertEquals(42, %ToNumber(b));
+assertEquals(42, %_ToNumber(b));
+
+var c = {
+ toString: function() { return "x"},
+ valueOf: function() { return 123 }
+};
+assertEquals(123, %ToNumber(c));
+assertEquals(123, %_ToNumber(c));
+
+var d = {
+ [Symbol.toPrimitive]: function(hint) {
+ assertEquals("number", hint);
+ return 987654321;
+ }
+};
+assertEquals(987654321, %ToNumber(d));
+assertEquals(987654321, %_ToNumber(d));
+
+var e = new Date(0);
+assertEquals(0, %ToNumber(e));
+assertEquals(0, %_ToNumber(e));
diff --git a/deps/v8/test/mjsunit/harmony/to-primitive.js b/deps/v8/test/mjsunit/harmony/to-primitive.js
new file mode 100644
index 0000000000..09280bf1ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/to-primitive.js
@@ -0,0 +1,106 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals(1, %ToPrimitive(1));
+assertEquals(1, %ToPrimitive_Number(1));
+assertEquals(1, %ToPrimitive_String(1));
+assertEquals(1, %_ToPrimitive(1));
+assertEquals(1, %_ToPrimitive_Number(1));
+assertEquals(1, %_ToPrimitive_String(1));
+
+assertEquals(.5, %ToPrimitive(.5));
+assertEquals(.5, %ToPrimitive_Number(.5));
+assertEquals(.5, %ToPrimitive_String(.5));
+assertEquals(.5, %_ToPrimitive(.5));
+assertEquals(.5, %_ToPrimitive_Number(.5));
+assertEquals(.5, %_ToPrimitive_String(.5));
+
+assertEquals(null, %ToPrimitive(null));
+assertEquals(null, %ToPrimitive_Number(null));
+assertEquals(null, %ToPrimitive_String(null));
+assertEquals(null, %_ToPrimitive(null));
+assertEquals(null, %_ToPrimitive_Number(null));
+assertEquals(null, %_ToPrimitive_String(null));
+
+assertEquals(true, %ToPrimitive(true));
+assertEquals(true, %ToPrimitive_Number(true));
+assertEquals(true, %ToPrimitive_String(true));
+assertEquals(true, %_ToPrimitive(true));
+assertEquals(true, %_ToPrimitive_Number(true));
+assertEquals(true, %_ToPrimitive_String(true));
+
+assertEquals(false, %ToPrimitive(false));
+assertEquals(false, %ToPrimitive_Number(false));
+assertEquals(false, %ToPrimitive_String(false));
+assertEquals(false, %_ToPrimitive(false));
+assertEquals(false, %_ToPrimitive_Number(false));
+assertEquals(false, %_ToPrimitive_String(false));
+
+assertEquals(undefined, %ToPrimitive(undefined));
+assertEquals(undefined, %ToPrimitive_Number(undefined));
+assertEquals(undefined, %ToPrimitive_String(undefined));
+assertEquals(undefined, %_ToPrimitive(undefined));
+assertEquals(undefined, %_ToPrimitive_Number(undefined));
+assertEquals(undefined, %_ToPrimitive_String(undefined));
+
+assertEquals("random text", %ToPrimitive("random text"));
+assertEquals("random text", %ToPrimitive_Number("random text"));
+assertEquals("random text", %ToPrimitive_String("random text"));
+assertEquals("random text", %_ToPrimitive("random text"));
+assertEquals("random text", %_ToPrimitive_Number("random text"));
+assertEquals("random text", %_ToPrimitive_String("random text"));
+
+assertEquals(Symbol.toPrimitive, %ToPrimitive(Symbol.toPrimitive));
+assertEquals(Symbol.toPrimitive, %ToPrimitive_Number(Symbol.toPrimitive));
+assertEquals(Symbol.toPrimitive, %ToPrimitive_String(Symbol.toPrimitive));
+assertEquals(Symbol.toPrimitive, %_ToPrimitive(Symbol.toPrimitive));
+assertEquals(Symbol.toPrimitive, %_ToPrimitive_Number(Symbol.toPrimitive));
+assertEquals(Symbol.toPrimitive, %_ToPrimitive_String(Symbol.toPrimitive));
+
+var a = { toString: function() { return "xyz" }};
+assertEquals("xyz", %ToPrimitive(a));
+assertEquals("xyz", %ToPrimitive_Number(a));
+assertEquals("xyz", %ToPrimitive_String(a));
+assertEquals("xyz", %_ToPrimitive(a));
+assertEquals("xyz", %_ToPrimitive_Number(a));
+assertEquals("xyz", %_ToPrimitive_String(a));
+
+var b = { valueOf: function() { return 42 }};
+assertEquals(42, %ToPrimitive(b));
+assertEquals(42, %ToPrimitive_Number(b));
+assertEquals("[object Object]", %ToPrimitive_String(b));
+assertEquals(42, %_ToPrimitive(b));
+assertEquals(42, %_ToPrimitive_Number(b));
+assertEquals("[object Object]", %_ToPrimitive_String(b));
+
+var c = {
+ toString: function() { return "x"},
+ valueOf: function() { return 123 }
+};
+assertEquals(123, %ToPrimitive(c));
+assertEquals(123, %ToPrimitive_Number(c));
+assertEquals("x", %ToPrimitive_String(c));
+assertEquals(123, %_ToPrimitive(c));
+assertEquals(123, %_ToPrimitive_Number(c));
+assertEquals("x", %_ToPrimitive_String(c));
+
+var d = {
+ [Symbol.toPrimitive]: function(hint) { return hint }
+};
+assertEquals("default", %ToPrimitive(d));
+assertEquals("number", %ToPrimitive_Number(d));
+assertEquals("string", %ToPrimitive_String(d));
+assertEquals("default", %_ToPrimitive(d));
+assertEquals("number", %_ToPrimitive_Number(d));
+assertEquals("string", %_ToPrimitive_String(d));
+
+var e = new Date(0);
+assertEquals(e.toString(), %ToPrimitive(e));
+assertEquals(0, %ToPrimitive_Number(e));
+assertEquals(e.toString(), %ToPrimitive_String(e));
+assertEquals(e.toString(), %_ToPrimitive(e));
+assertEquals(0, %_ToPrimitive_Number(e));
+assertEquals(e.toString(), %_ToPrimitive_String(e));
diff --git a/deps/v8/test/mjsunit/harmony/to-string.js b/deps/v8/test/mjsunit/harmony/to-string.js
new file mode 100644
index 0000000000..103ba89d1d
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/to-string.js
@@ -0,0 +1,54 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals("1", %ToString(1));
+assertEquals("1", %_ToString(1));
+
+assertEquals("0.5", %ToString(.5));
+assertEquals("0.5", %_ToString(.5));
+
+assertEquals("null", %ToString(null));
+assertEquals("null", %_ToString(null));
+
+assertEquals("true", %ToString(true));
+assertEquals("true", %_ToString(true));
+
+assertEquals("false", %ToString(false));
+assertEquals("false", %_ToString(false));
+
+assertEquals("undefined", %ToString(undefined));
+assertEquals("undefined", %_ToString(undefined));
+
+assertEquals("random text", %ToString("random text"));
+assertEquals("random text", %_ToString("random text"));
+
+assertThrows(function() { %ToString(Symbol.toPrimitive) }, TypeError);
+assertThrows(function() { %_ToString(Symbol.toPrimitive) }, TypeError);
+
+var a = { toString: function() { return "xyz" }};
+assertEquals("xyz", %ToString(a));
+assertEquals("xyz", %_ToString(a));
+
+var b = { valueOf: function() { return 42 }};
+assertEquals("[object Object]", %ToString(b));
+assertEquals("[object Object]", %_ToString(b));
+
+var c = {
+ toString: function() { return "x"},
+ valueOf: function() { return 123 }
+};
+assertEquals("x", %ToString(c));
+assertEquals("x", %_ToString(c));
+
+var d = {
+ [Symbol.toPrimitive]: function(hint) { return hint }
+};
+assertEquals("string", %ToString(d));
+assertEquals("string", %_ToString(d));
+
+var e = new Date(0);
+assertEquals(e.toString(), %ToName(e));
+assertEquals(e.toString(), %_ToName(e));
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 45443c75d1..b8c3114d36 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -76,7 +76,8 @@ test(function() {
// kCannotConvertToPrimitive
test(function() {
- [].join(Object(Symbol(1)));
+ var o = { toString: function() { return this } };
+ [].join(o);
}, "Cannot convert object to primitive value", TypeError);
// kCircularStructure
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 0b333f10c9..f549718628 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -75,10 +75,12 @@
'bit-not': [PASS, NO_VARIANTS],
'json2': [PASS, NO_VARIANTS],
'packed-elements': [PASS, NO_VARIANTS],
+ 'string-indexof-1': [PASS, NO_VARIANTS],
'unbox-double-arrays': [PASS, NO_VARIANTS],
'unicode-test': [PASS, NO_VARIANTS],
'whitespaces': [PASS, NO_VARIANTS],
'compiler/osr-assert': [PASS, NO_VARIANTS],
+ 'es6/string-fromcodepoint': [PASS, NO_VARIANTS],
'regress/regress-2185-2': [PASS, NO_VARIANTS],
'regress/regress-2612': [PASS, NO_VARIANTS],
@@ -182,8 +184,8 @@
'array-constructor': [PASS, TIMEOUT],
# Very slow on ARM and MIPS, contains no architecture dependent code.
- 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
- 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
+ 'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', TIMEOUT]],
+ 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips64 or arch == mips', SKIP]],
'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch == arm or arch == arm64 or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
##############################################################################
@@ -249,13 +251,6 @@
# BUG(chromium:508074). Remove this once the issue is fixed.
'harmony/arrow-rest-params': [PASS, NO_VARIANTS],
'harmony/rest-params': [PASS, ['no_snap == True', NO_VARIANTS]],
-
- # BUG(v8:4378).
- 'regress/regress-crbug-501711': [PASS, ['isolates', SKIP]],
- 'regress/regress-4279': [PASS, ['isolates', SKIP]],
-
- # BUG(chromium:518748)
- 'regress/regress-crbug-518748': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -285,6 +280,7 @@
'regress/regress-crbug-137689': [SKIP],
'regress/regress-165637': [SKIP],
'regress/regress-2249': [SKIP],
+ 'regress/regress-4121': [SKIP],
# Tests taking too long
'debug-stepout-scope-part8': [SKIP],
'mirror-object': [SKIP],
@@ -308,6 +304,9 @@
'unicodelctest': [PASS, NO_VARIANTS],
'unicodelctest-no-optimization': [PASS, NO_VARIANTS],
+ # TODO(jkummerow): Doesn't work correctly in GC stress.
+ 'regress/regress-crbug-500497': [SKIP],
+
# Too slow for gc stress.
'asm/embenchen/box2d': [SKIP],
@@ -325,6 +324,12 @@
}], # 'gc_stress == True'
##############################################################################
+['no_i18n == True and mode == debug', {
+ # Tests too slow for no18n debug.
+ 'regress/regress-1200351': [SKIP],
+}], # 'no_i18n == True and mode == debug'
+
+##############################################################################
['byteorder == big', {
# Emscripten requires little-endian, skip all tests on big endian platforms.
'asm/embenchen/*': [SKIP],
@@ -538,7 +543,7 @@
}], # 'arch == mips'
##############################################################################
-['arch == mips64el', {
+['arch == mips64el or arch == mips64', {
# Slow tests which times out in debug mode.
'try': [PASS, ['mode == debug', SKIP]],
@@ -586,7 +591,7 @@
# Currently always deopt on minus zero
'math-floor-of-div-minus-zero': [SKIP],
-}], # 'arch == mips64el'
+}], # 'arch == mips64el or arch == mips64'
##############################################################################
['system == windows', {
@@ -674,6 +679,13 @@
}], # 'deopt_fuzzer == True'
##############################################################################
+['predictable == True', {
+
+ # Skip tests that are known to be non-deterministic.
+ 'd8-worker-sharedarraybuffer': [SKIP],
+}], # 'predictable == True'
+
+##############################################################################
['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
# take too long with the simulator.
diff --git a/deps/v8/test/mjsunit/regress/regress-105.js b/deps/v8/test/mjsunit/regress/regress-105.js
index 8b8030ffec..877cb82317 100644
--- a/deps/v8/test/mjsunit/regress/regress-105.js
+++ b/deps/v8/test/mjsunit/regress/regress-105.js
@@ -26,12 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var custom_valueOf = function() {
- assertEquals(Number, custom_valueOf.caller);
+ assertEquals(null, custom_valueOf.caller);
return 2;
}
var custom_toString = function() {
- assertEquals(String, custom_toString.caller);
+ assertEquals(null, custom_toString.caller);
return "I used to be an adventurer like you";
}
diff --git a/deps/v8/test/mjsunit/regress/regress-165637.js b/deps/v8/test/mjsunit/regress/regress-165637.js
index 84c9041216..6e28726356 100644
--- a/deps/v8/test/mjsunit/regress/regress-165637.js
+++ b/deps/v8/test/mjsunit/regress/regress-165637.js
@@ -51,7 +51,7 @@ assertTrue(do_slices() < (3 * 1000));
// Make sure that packed and unpacked array slices are still properly handled
var holey_array = [1, 2, 3, 4, 5,,,,,,];
-assertFalse(%HasFastHoleyElements(holey_array.slice(6, 1)));
-assertEquals(undefined, holey_array.slice(6, 7)[0])
-assertFalse(%HasFastHoleyElements(holey_array.slice(2, 1)));
-assertEquals(3, holey_array.slice(2, 3)[0])
+assertEquals([undefined], holey_array.slice(6, 7));
+assertEquals(undefined, holey_array.slice(6, 7)[0]);
+assertEquals([], holey_array.slice(2, 1));
+assertEquals(3, holey_array.slice(2, 3)[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-3926.js b/deps/v8/test/mjsunit/regress/regress-3926.js
new file mode 100644
index 0000000000..4720c1b908
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3926.js
@@ -0,0 +1,87 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-sloppy --harmony-sloppy-let
+
+// See: http://code.google.com/p/v8/issues/detail?id=3926
+
+// Switch statements should disable hole check elimination
+
+// Ensure that both reads and writes encounter the hole check
+// FullCodeGen had an issue on reads; TurboFan had an issue on writes
+function f(x) {
+ var z;
+ switch (x) {
+ case 1:
+ let y = 1;
+ case 2:
+ y = 2;
+ case 3:
+ z = y;
+ }
+ return z;
+}
+assertEquals(2, f(1));
+assertThrows(function() {f(2)}, ReferenceError);
+assertThrows(function() {f(3)}, ReferenceError);
+
+// Ensure that hole checks are done even in subordinate scopes
+assertThrows(function() {
+ switch (1) {
+ case 0:
+ let x = 2;
+ case 1:
+ { // this block, plus the let below, adds another linear lexical scope
+ let y = 3;
+ x;
+ }
+ }
+}, ReferenceError);
+
+// Ensure that inner functions and eval don't skip hole checks
+
+function g(x) {
+ switch (x) {
+ case 1:
+ let z;
+ case 2:
+ return function() { z = 1; }
+ case 3:
+ return function() { return z; }
+ case 4:
+ return eval("z = 1");
+ case 5:
+ return eval("z");
+ }
+}
+
+assertEquals(undefined, g(1)());
+assertThrows(g(2), ReferenceError);
+assertThrows(g(3), ReferenceError);
+assertThrows(function () {g(4)}, ReferenceError);
+assertThrows(function () {g(5)}, ReferenceError);
+
+// Ensure the same in strict mode, with different eval and function semantics
+
+function h(x) {
+ 'use strict'
+ switch (x) {
+ case 1:
+ let z;
+ case 2:
+ return function() { z = 1; }
+ case 3:
+ return function() { return z; }
+ case 4:
+ return eval("z = 1");
+ case 5:
+ return eval("z");
+ }
+}
+
+assertEquals(undefined, h(1)());
+assertThrows(h(2), ReferenceError);
+assertThrows(h(3), ReferenceError);
+assertThrows(function () {h(4)}, ReferenceError);
+assertThrows(function () {h(5)}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4121.js b/deps/v8/test/mjsunit/regress/regress-4121.js
index bef0b47ee5..a175ed9fd2 100644
--- a/deps/v8/test/mjsunit/regress/regress-4121.js
+++ b/deps/v8/test/mjsunit/regress/regress-4121.js
@@ -4,55 +4,45 @@
// Flags: --allow-natives-syntax
-function Migrator(o) {
- return o.foo;
+function literals_sharing_test(warmup, optimize) {
+ function closure() {
+ // Ensure small array literals start in specific element kind mode.
+ assertTrue(%HasFastSmiElements([]));
+ assertTrue(%HasFastSmiElements([1]));
+ assertTrue(%HasFastSmiElements([1,2]));
+ assertTrue(%HasFastDoubleElements([1.1]));
+ assertTrue(%HasFastDoubleElements([1.1,2]));
+
+ var a = [1, 2, 3];
+ if (warmup) {
+ // Transition elements kind during warmup...
+ assertTrue(%HasFastSmiElements(a));
+ assertEquals(4, a.push(1.3));
+ }
+ // ... and ensure that the information about transitioning is
+ // propagated to the next closure.
+ assertTrue(%HasFastDoubleElements(a));
+ };
+ if (optimize) %OptimizeFunctionOnNextCall(closure);
+ closure();
}
-function Loader(o) {
- return o[0];
-}
-
-var first_smi_array = [1];
-var second_smi_array = [2];
-var first_object_array = ["first"];
-var second_object_array = ["string"];
-
-assertTrue(%HasFastSmiElements(first_smi_array));
-assertTrue(%HasFastSmiElements(second_smi_array));
-assertTrue(%HasFastObjectElements(first_object_array));
-assertTrue(%HasFastObjectElements(second_object_array));
-
-// Prepare identical transition chains for smi and object arrays.
-first_smi_array.foo = 0;
-second_smi_array.foo = 0;
-first_object_array.foo = 0;
-second_object_array.foo = 0;
-
-// Collect type feedback for not-yet-deprecated original object array map.
-for (var i = 0; i < 3; i++) Migrator(second_object_array);
-// Blaze a migration trail for smi array maps.
-// This marks the migrated smi array map as a migration target.
-first_smi_array.foo = 0.5;
-print(second_smi_array.foo);
-// Deprecate original object array map.
-// Use TryMigrate from deferred optimized code to migrate second object array.
-first_object_array.foo = 0.5;
-%OptimizeFunctionOnNextCall(Migrator);
-Migrator(second_object_array);
-
-// |second_object_array| now erroneously has a smi map.
-// Optimized code assuming smi elements will expose this.
-
-for (var i = 0; i < 3; i++) Loader(second_smi_array);
-%OptimizeFunctionOnNextCall(Loader);
-assertEquals("string", Loader(second_object_array));
+function test() {
+ var warmup = true;
+ for (var i = 0; i < 3; i++) {
+ print("iter: " + i + ", warmup: "+ warmup);
+ literals_sharing_test(warmup, false);
+ warmup = false;
+ }
+ print("iter: " + i + ", opt: true");
+ literals_sharing_test(warmup, true);
+}
-// Any of the following checks will also fail:
-assertTrue(%HasFastObjectElements(second_object_array));
-assertFalse(%HasFastSmiElements(second_object_array));
-assertTrue(%HaveSameMap(first_object_array, second_object_array));
-assertFalse(%HaveSameMap(first_smi_array, second_object_array));
-%ClearFunctionTypeFeedback(Loader);
-%ClearFunctionTypeFeedback(Migrator);
+function stress_opt_test() {}
+stress_opt_test();
+if (%GetOptimizationStatus(stress_opt_test) == 2) {
+ // This test is not suitable for --always-opt mode.
+ test();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-4173.js b/deps/v8/test/mjsunit/regress/regress-4173.js
new file mode 100644
index 0000000000..bef0b47ee5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4173.js
@@ -0,0 +1,58 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Migrator(o) {
+ return o.foo;
+}
+function Loader(o) {
+ return o[0];
+}
+
+var first_smi_array = [1];
+var second_smi_array = [2];
+var first_object_array = ["first"];
+var second_object_array = ["string"];
+
+assertTrue(%HasFastSmiElements(first_smi_array));
+assertTrue(%HasFastSmiElements(second_smi_array));
+assertTrue(%HasFastObjectElements(first_object_array));
+assertTrue(%HasFastObjectElements(second_object_array));
+
+// Prepare identical transition chains for smi and object arrays.
+first_smi_array.foo = 0;
+second_smi_array.foo = 0;
+first_object_array.foo = 0;
+second_object_array.foo = 0;
+
+// Collect type feedback for not-yet-deprecated original object array map.
+for (var i = 0; i < 3; i++) Migrator(second_object_array);
+
+// Blaze a migration trail for smi array maps.
+// This marks the migrated smi array map as a migration target.
+first_smi_array.foo = 0.5;
+print(second_smi_array.foo);
+
+// Deprecate original object array map.
+// Use TryMigrate from deferred optimized code to migrate second object array.
+first_object_array.foo = 0.5;
+%OptimizeFunctionOnNextCall(Migrator);
+Migrator(second_object_array);
+
+// |second_object_array| now erroneously has a smi map.
+// Optimized code assuming smi elements will expose this.
+
+for (var i = 0; i < 3; i++) Loader(second_smi_array);
+%OptimizeFunctionOnNextCall(Loader);
+assertEquals("string", Loader(second_object_array));
+
+// Any of the following checks will also fail:
+assertTrue(%HasFastObjectElements(second_object_array));
+assertFalse(%HasFastSmiElements(second_object_array));
+assertTrue(%HaveSameMap(first_object_array, second_object_array));
+assertFalse(%HaveSameMap(first_smi_array, second_object_array));
+
+%ClearFunctionTypeFeedback(Loader);
+%ClearFunctionTypeFeedback(Migrator);
diff --git a/deps/v8/test/mjsunit/regress/regress-4374.js b/deps/v8/test/mjsunit/regress/regress-4374.js
new file mode 100644
index 0000000000..afae71c595
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4374.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --function-context-specialization
+// Flags: --turbo-filter=f --turbo-inlining
+
+var f = (function() {
+ var max = Math.max;
+ return function f() { return max(0, -1); };
+})();
+
+assertEquals(0, f());
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0, f());
diff --git a/deps/v8/test/mjsunit/regress/regress-4376-1.js b/deps/v8/test/mjsunit/regress/regress-4376-1.js
new file mode 100644
index 0000000000..edb97ee2a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4376-1.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Bar() { }
+function Baz() { }
+Baz.prototype = { __proto__: new Bar() }
+var x = new Baz();
+function foo(y) { return y instanceof Bar; }
+assertTrue(foo(x));
+Baz.prototype.__proto__ = null;
+assertFalse(foo(x));
diff --git a/deps/v8/test/mjsunit/regress/regress-4376-2.js b/deps/v8/test/mjsunit/regress/regress-4376-2.js
new file mode 100644
index 0000000000..2a37204eab
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4376-2.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Foo() {}
+var x = new Foo();
+Foo.prototype = 1;
+function foo() { return x instanceof Foo; }
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4376-3.js b/deps/v8/test/mjsunit/regress/regress-4376-3.js
new file mode 100644
index 0000000000..3240cf0cd9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4376-3.js
@@ -0,0 +1,10 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Foo() {}
+var x = new Foo();
+function foo() { return x instanceof Foo; }
+assertTrue(foo());
+Foo.prototype = 1;
+assertThrows(foo, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4377.js b/deps/v8/test/mjsunit/regress/regress-4377.js
new file mode 100644
index 0000000000..3c4278ac0b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4377.js
@@ -0,0 +1,45 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// See: http://code.google.com/p/v8/issues/detail?id=4377
+
+// Switch statements should introduce their own lexical scope
+
+'use strict';
+
+switch (1) { case 1: let x = 2; }
+
+assertThrows(function() { return x; }, ReferenceError);
+
+{
+ let result;
+ let x = 1;
+ switch (x) {
+ case 1:
+ let x = 2;
+ result = x;
+ break;
+ default:
+ result = 0;
+ break;
+ }
+ assertEquals(1, x);
+ assertEquals(2, result);
+}
+
+{
+ let result;
+ let x = 1;
+ switch (eval('x')) {
+ case 1:
+ let x = 2;
+ result = x;
+ break;
+ default:
+ result = 0;
+ break;
+ }
+ assertEquals(1, x);
+ assertEquals(2, result);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-4380.js b/deps/v8/test/mjsunit/regress/regress-4380.js
new file mode 100644
index 0000000000..8a83def6e2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4380.js
@@ -0,0 +1,20 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function bar(a) {
+ var x = a[0];
+ return x == undefined;
+}
+
+// Make the keyed load be polymorphic on holey smi and holey fast.
+bar([, 2, 3]);
+bar([, 'two', 'three']);
+bar([, 2, 3]);
+
+%OptimizeFunctionOnNextCall(bar);
+bar([, 2, 3]);
+// Verify that loading the hole doesn't cause deoptimization.
+assertOptimized(bar);
diff --git a/deps/v8/test/mjsunit/regress/regress-4388.js b/deps/v8/test/mjsunit/regress/regress-4388.js
new file mode 100644
index 0000000000..908bcccb4e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4388.js
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-filter=test*
+
+// Tests that TurboFan emits a dynamic hole-check for the temporal dead zone at
+// a non-initializing assignments to a {let} variable.
+function test_hole_check_for_let(a) {
+ 'use strict';
+ { switch (a) {
+ case 0: let x;
+ case 1: x = 9;
+ }
+ }
+}
+assertDoesNotThrow("test_hole_check_for_let(0)");
+assertThrows("test_hole_check_for_let(1)", ReferenceError);
+%OptimizeFunctionOnNextCall(test_hole_check_for_let)
+assertThrows("test_hole_check_for_let(1)", ReferenceError);
+
+// Tests that TurboFan emits a dynamic hole-check for the temporal dead zone at
+// a non-initializing assignments to a {const} variable.
+function test_hole_check_for_const(a) {
+ 'use strict';
+ { switch (a) {
+ case 0: const x = 3;
+ case 1: x = 2;
+ }
+ }
+}
+assertThrows("test_hole_check_for_const(0)", TypeError);
+assertThrows("test_hole_check_for_const(1)", ReferenceError);
+%OptimizeFunctionOnNextCall(test_hole_check_for_const)
+assertThrows("test_hole_check_for_const(1)", ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-4399.js b/deps/v8/test/mjsunit/regress/regress-4399.js
new file mode 100644
index 0000000000..c76c0c83b5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4399.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test that switch has the appropriate 'eval' value
+
+assertEquals("foo", eval('switch(1) { case 1: "foo" }'));
+assertEquals("foo", eval('{ switch(1) { case 1: "foo" } }'));
+assertEquals("foo", eval('switch(1) { case 1: { "foo" } }'));
+assertEquals("foo", eval('switch(1) { case 1: "foo"; break; case 2: "bar"; break }'));
+assertEquals("bar", eval('switch(2) { case 1: "foo"; break; case 2: "bar"; break }'));
+assertEquals("bar", eval('switch(1) { case 1: "foo"; case 2: "bar"; break }'));
+
+// The tag is not the value, if there's no value
+
+assertEquals(undefined, eval('switch (1) {}'));
+assertEquals(undefined, eval('switch (1) { case 1: {} }'));
diff --git a/deps/v8/test/mjsunit/regress/regress-520029.js b/deps/v8/test/mjsunit/regress/regress-520029.js
new file mode 100644
index 0000000000..299dd75017
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-520029.js
@@ -0,0 +1,29 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-sloppy-let --harmony-sloppy
+
+// Test that hoisting a function out of a lexical scope does not
+// lead to a parsing error
+
+// This used to cause a crash in the parser
+function f(one) { class x { } { class x { } function g() { one; x; } g() } } f()
+
+// This used to lead to a ReferenceError
+function g() { var x = 1; { let x = 2; function g() { x; } g(); } }
+assertEquals(undefined, g());
+
+// This used to cause a crash in the parser
+function __f_4(one) {
+ var __v_10 = one + 1;
+ {
+ let __v_10 = one + 3;
+ function __f_6() {
+ one;
+ __v_10;
+ }
+ __f_6();
+ }
+}
+__f_4();
diff --git a/deps/v8/test/mjsunit/regress/regress-539875.js b/deps/v8/test/mjsunit/regress/regress-539875.js
new file mode 100644
index 0000000000..b100c3bcf0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-539875.js
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function testSeal() {
+ var sloppy = arguments;
+ var sym = Symbol();
+ sloppy[sym] = 123;
+ Object.seal(sloppy);
+ assertTrue(Object.isSealed(sloppy));
+ var desc = Object.getOwnPropertyDescriptor(sloppy, sym);
+ assertEquals(123, desc.value);
+ assertFalse(desc.configurable);
+ assertTrue(desc.writable);
+})();
+
+
+(function testFreeze() {
+ var sloppy = arguments;
+ var sym = Symbol();
+ sloppy[sym] = 123;
+ Object.freeze(sloppy);
+ assertTrue(Object.isFrozen(sloppy));
+ var desc = Object.getOwnPropertyDescriptor(sloppy, sym);
+ assertEquals(123, desc.value);
+ assertFalse(desc.configurable);
+ assertFalse(desc.writable);
+})();
+
+
+(function testIsFrozenAndIsSealed() {
+ var sym = Symbol();
+ var obj = { [sym]: 123 };
+ Object.preventExtensions(obj);
+ assertFalse(Object.isFrozen(obj));
+ assertFalse(Object.isSealed(obj));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-544991.js b/deps/v8/test/mjsunit/regress/regress-544991.js
new file mode 100644
index 0000000000..dc09fae6a4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-544991.js
@@ -0,0 +1,15 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var typedArray = new Int8Array(1);
+var saved;
+var called;
+typedArray.constructor = function(x) { called = true; saved = x };
+typedArray.constructor.prototype = Int8Array.prototype;
+typedArray.map(function(){});
+
+// To meet the spec, constructor shouldn't be called directly, but
+// if it is called for now, the argument should be an Array
+assertTrue(called); // Will fail later; when so, delete this test
+assertEquals("Array", saved.constructor.name);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
index 2d3d40f0f7..9117440c2c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-500497.js
@@ -13,9 +13,6 @@ function Ctor() {
}
for (var i = 0; i < 120; i++) {
- // This print() is important! Without it, in --gc-stress mode, the function
- // Ctor is optimized too early. No idea why.
- print(i);
// Make the "a" property long-lived, while everything else is short-lived.
global.push(Ctor().a);
(function FillNewSpace() { new Array(10000); })();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-501809.js b/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
index c3abadfab5..855b36a054 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-501809.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-sharedarraybuffer --harmony-atomics
+// Flags: --harmony-sharedarraybuffer
var sab = new SharedArrayBuffer(8);
var ta = new Int32Array(sab);
ta.__defineSetter__('length', function() {;});
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-518748.js b/deps/v8/test/mjsunit/regress/regress-crbug-518748.js
deleted file mode 100644
index cccbc26c24..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-518748.js
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-if (this.Worker) {
- var workersToCreate = 1000;
- var workers = [];
- assertThrows(function() {
- for (var i = 0; i < workersToCreate; i++) {
- workers.push(new Worker(''));
- }
- });
- print('#workers: ', workers.length);
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-522895.js b/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
new file mode 100644
index 0000000000..f28f3a1cb9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-522895.js
@@ -0,0 +1,22 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr
+
+var body =
+ "function bar1( ) {" +
+ " var i = 35; " +
+ " while (i-- > 31) {" +
+ " %OptimizeOsr(); " +
+ " j = 9; " +
+ " while (j-- > 7);" +
+ " } " +
+ " return i; " +
+ "}";
+
+function gen() {
+ return eval("(" + body + ")");
+}
+
+gen()();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523213.js b/deps/v8/test/mjsunit/regress/regress-crbug-523213.js
new file mode 100644
index 0000000000..15b16bb4f9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523213.js
@@ -0,0 +1,21 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v1 = [];
+var v2 = [];
+v1.__proto__ = v2;
+
+function f(){
+ var a = [];
+ for(var i=0; i<2; i++){
+ a.push([]);
+ a = v2;
+ }
+}
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523307.js b/deps/v8/test/mjsunit/regress/regress-crbug-523307.js
new file mode 100644
index 0000000000..f2909675b2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523307.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+ var c = x * x << 366;
+ var a = c + c;
+ return a;
+}
+
+f(1);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+f(1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-523919.js b/deps/v8/test/mjsunit/regress/regress-crbug-523919.js
new file mode 100644
index 0000000000..4b2a8fe93e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-523919.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --cache=code
+
+[1.5,
+[2.5,
+[3.5,
+[4.5,
+[5.5,
+[6.5,
+[7.5,
+[8.5,
+[9.5,
+[10.5,
+[11.5,
+[12.5,
+[13.5,
+[14.5,
+[15.5,
+[16.5,
+[17.5,
+[18.5,
+[19.5,
+[20.5,
+[21.5,
+[22.5,
+[23.5,
+[24.5,
+[25.5]]]]]]]]]]]]]]]]]]]]]]]]];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-527364.js b/deps/v8/test/mjsunit/regress/regress-crbug-527364.js
new file mode 100644
index 0000000000..914bed01ab
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-527364.js
@@ -0,0 +1,26 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100 --allow-natives-syntax
+
+function module() {
+ "use asm";
+ var abs = Math.abs;
+ function f() {
+ return +abs();
+ }
+ return { f:f };
+}
+
+function run_close_to_stack_limit(f) {
+ try {
+ run_close_to_stack_limit(f);
+ f();
+ } catch(e) {
+ }
+}
+
+var boom = module().f;
+%OptimizeFunctionOnNextCall(boom)
+run_close_to_stack_limit(boom);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-530598.js b/deps/v8/test/mjsunit/regress/regress-crbug-530598.js
new file mode 100644
index 0000000000..f38552377f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-530598.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-inlining
+
+var f1 = (function() {
+ "use asm";
+ function g() { throw 0; }
+ function f() { return g(); }
+ return f;
+})();
+assertThrows("f1()");
+%OptimizeFunctionOnNextCall(f1);
+assertThrows("f1()");
+
+var f2 = (function() {
+ "use asm";
+ function g() { for (;;); }
+ function f(a) { return a || g(); }
+ return f;
+})();
+assertTrue(f2(true));
+%OptimizeFunctionOnNextCall(f2);
+assertTrue(f2(true));
diff --git a/deps/v8/test/mjsunit/regress/regress-osr-context.js b/deps/v8/test/mjsunit/regress/regress-osr-context.js
index 8ceb79119a..a73954156c 100644
--- a/deps/v8/test/mjsunit/regress/regress-osr-context.js
+++ b/deps/v8/test/mjsunit/regress/regress-osr-context.js
@@ -2,7 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --context-specialization --turbo-filter=f
+// Flags: --allow-natives-syntax --function-context-specialization
+// Flags: --turbo-filter=f
(function() {
"use strict";
diff --git a/deps/v8/test/mjsunit/stack-traces-2.js b/deps/v8/test/mjsunit/stack-traces-2.js
index 3bec963701..a54bb45ff5 100644
--- a/deps/v8/test/mjsunit/stack-traces-2.js
+++ b/deps/v8/test/mjsunit/stack-traces-2.js
@@ -82,6 +82,3 @@ testTraceNativeConstructor(Date); // Does ToNumber on argument.
testNotOmittedBuiltin(function(){ [thrower, 2].sort(function (a,b) {
(b < a) - (a < b); });
}, "QuickSort");
-
-// Not omitted even though ADD from runtime.js is a non-native builtin.
-testNotOmittedBuiltin(function(){ thrower + 2; }, "ADD");
diff --git a/deps/v8/test/mjsunit/string-indexof-1.js b/deps/v8/test/mjsunit/string-indexof-1.js
index db3623f7c0..b9dad46d3d 100644
--- a/deps/v8/test/mjsunit/string-indexof-1.js
+++ b/deps/v8/test/mjsunit/string-indexof-1.js
@@ -77,6 +77,20 @@ assertEquals(-1, twoByteString.indexOf("\u0391\u03a3\u0395"),
//single char pattern
assertEquals(4, twoByteString.indexOf("\u0395"));
+// test string with alignment traps
+var alignmentString = "\u1122\u2211\u2222\uFF00\u00FF\u00FF";
+assertEquals(2, alignmentString.indexOf("\u2222"));
+assertEquals(4, alignmentString.indexOf("\u00FF\u00FF"));
+
+var longAlignmentString = "\uFF00" + "\u00FF".repeat(10);
+assertEquals(1,
+ longAlignmentString.indexOf("\u00FF".repeat(10)));
+
+// test string with first character match at the end
+var boundsString = "112233";
+assertEquals(-1, boundsString.indexOf("334455"));
+assertEquals(-1, boundsString.indexOf("334455".repeat(10)));
+
// Test complex string indexOf algorithms. Only trigger for long strings.
// Long string that isn't a simple repeat of a shorter string.
diff --git a/deps/v8/test/mjsunit/strong/class-literals.js b/deps/v8/test/mjsunit/strong/class-literals.js
new file mode 100644
index 0000000000..a0e7280abc
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/class-literals.js
@@ -0,0 +1,159 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode --allow-natives-syntax
+
+'use strict';
+
+function assertWeakClassWeakInstances(x) {
+ assertFalse(%IsStrong(x));
+ assertFalse(%IsStrong(x.prototype));
+ assertFalse(%IsStrong(new x));
+}
+
+function assertWeakClassStrongInstances(x) {
+ assertFalse(%IsStrong(x));
+ assertFalse(%IsStrong(x.prototype));
+ assertTrue(%IsStrong(new x));
+}
+
+function assertStrongClassWeakInstances(x) {
+ assertTrue(%IsStrong(x));
+ assertTrue(%IsStrong(x.prototype));
+ assertFalse(%IsStrong(new x));
+}
+
+function assertStrongClassStrongInstances(x) {
+ assertTrue(%IsStrong(x));
+ assertTrue(%IsStrong(x.prototype));
+ assertTrue(%IsStrong(new x));
+}
+
+function getWeakClass() {
+ return (class {});
+}
+
+function getWeakClassExtends(x) {
+ return (class extends x {});
+}
+
+function getStrongClass() {
+ "use strong";
+ return (class {});
+}
+
+function getStrongClassExtends(x) {
+ "use strong";
+ return (class extends x {});
+}
+
+(function SimpleWeakClassLiterals() {
+ class C {};
+ class D extends C {};
+ class E extends Object {};
+
+ assertWeakClassWeakInstances(C);
+ assertWeakClassWeakInstances(D);
+ assertWeakClassWeakInstances(E);
+
+ assertWeakClassWeakInstances(class {});
+ assertWeakClassWeakInstances(class extends Object {});
+ assertWeakClassWeakInstances(class extends C {});
+ assertWeakClassWeakInstances(class extends class {} {});
+})();
+
+(function SimpleStrongClassLiterals() {
+ 'use strong';
+ class C {};
+ class D extends C {};
+
+ assertStrongClassStrongInstances(C);
+ assertStrongClassStrongInstances(D);
+
+ assertStrongClassStrongInstances(class {});
+ assertStrongClassStrongInstances(class extends C {});
+ assertStrongClassStrongInstances(class extends class {} {});
+})();
+
+(function MixedWeakClassLiterals() {
+ class C extends getStrongClass() {};
+ class D extends getStrongClassExtends((class {})) {};
+ class E extends getStrongClassExtends(C) {};
+
+ assertWeakClassStrongInstances(C);
+ assertWeakClassStrongInstances(class extends getStrongClass() {});
+
+ assertWeakClassWeakInstances(D);
+ assertWeakClassWeakInstances(
+ class extends getStrongClassExtends((class {})) {});
+
+ assertWeakClassStrongInstances(E);
+ assertWeakClassStrongInstances(
+ class extends getStrongClassExtends(class extends getStrongClass() {}) {});
+})();
+
+(function MixedStrongClassLiterals() {
+ 'use strong';
+ class C extends getWeakClass() {};
+ class D extends getWeakClassExtends((class {})) {};
+ class E extends getWeakClassExtends(C) {};
+ class F extends Object {};
+
+ assertStrongClassWeakInstances(C);
+ assertStrongClassWeakInstances(class extends getWeakClass() {});
+
+ assertStrongClassStrongInstances(D);
+ assertStrongClassStrongInstances(
+ class extends getWeakClassExtends((class {})) {});
+
+ assertStrongClassWeakInstances(E);
+ assertStrongClassWeakInstances(
+ class extends getWeakClassExtends(class extends getWeakClass() {}) {});
+
+ assertStrongClassWeakInstances(F);
+ assertStrongClassWeakInstances(class extends Object {});
+})();
+
+(function WeakMonkeyPatchedClassLiterals() {
+ class C {};
+ assertWeakClassWeakInstances(C);
+ C.__proto__ = getStrongClass();
+ // C's default constructor doesn't call super.
+ assertWeakClassWeakInstances(C);
+
+ class D extends Object {};
+ assertWeakClassWeakInstances(D);
+ D.__proto__ = getStrongClass();
+ // D is a derived class, so its default constructor calls super.
+ assertWeakClassStrongInstances(D);
+
+ class E extends (class {}) {};
+ E.__proto__ = C;
+ assertWeakClassWeakInstances(E);
+
+ class F extends (class {}) {};
+ F.__proto__ = D;
+ assertWeakClassStrongInstances(F);
+
+ class G extends getStrongClass() {};
+ G.__proto__ = getWeakClass();
+ assertWeakClassWeakInstances(G);
+})();
+
+(function StrongMonkeyPatchedClassLiterals() {
+ let C = getStrongClassExtends(getWeakClassExtends(getStrongClass()));
+ let D = getStrongClassExtends(getWeakClassExtends(getWeakClass()));
+
+ assertStrongClassStrongInstances(C);
+ C.__proto__.__proto__ = getWeakClass();
+ assertStrongClassWeakInstances(C);
+ C.__proto__.__proto__ = getStrongClass();
+ assertStrongClassStrongInstances(C);
+
+ assertStrongClassWeakInstances(D);
+ D.__proto__.__proto__ = getStrongClass();
+ assertStrongClassStrongInstances(D);
+ D.__proto__.__proto__ = getWeakClass();
+ assertStrongClassWeakInstances(D);
+})();
diff --git a/deps/v8/test/mjsunit/strong/destructuring.js b/deps/v8/test/mjsunit/strong/destructuring.js
index 67fe2ef4f1..3145dcfb4d 100644
--- a/deps/v8/test/mjsunit/strong/destructuring.js
+++ b/deps/v8/test/mjsunit/strong/destructuring.js
@@ -6,7 +6,10 @@
// Flags: --harmony-arrow-functions --strong-mode --allow-natives-syntax
(function() {
- function f({ x = function() { return []; } }) { "use strong"; return x(); }
+ var f = (function() {
+ "use strong";
+ return function f({ x = function() { return []; } }) { return x(); };
+ })();
var a = f({ x: undefined });
assertTrue(%IsStrong(a));
@@ -19,7 +22,10 @@
assertFalse(%IsStrong(a));
function outerf() { return []; }
- function f2({ x = outerf }) { "use strong"; return x(); }
+ var f2 = (function() {
+ "use strong";
+ return function f2({ x = outerf }) { return x(); };
+ })();
a = f2({ x: undefined });
assertFalse(%IsStrong(a));
})();
diff --git a/deps/v8/test/mjsunit/strong/function-arity.js b/deps/v8/test/mjsunit/strong/function-arity.js
index 4d8833564c..67c0d1f195 100644
--- a/deps/v8/test/mjsunit/strong/function-arity.js
+++ b/deps/v8/test/mjsunit/strong/function-arity.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --strong-mode --harmony-arrow-functions --harmony-reflect
-// Flags: --harmony-spreadcalls --harmony-rest-parameters --allow-natives-syntax
+// Flags: --harmony-spread-calls --harmony-rest-parameters --allow-natives-syntax
'use strict';
@@ -21,7 +21,7 @@ function generateArguments(n, prefix) {
}
-function generateParams(n) {
+function generateParams(n, directive_in_body) {
let a = [];
for (let i = 0; i < n; i++) {
a[i] = `p${i}`;
@@ -29,13 +29,17 @@ function generateParams(n) {
return a.join(', ');
}
-function generateParamsWithRest(n) {
+function generateParamsWithRest(n, directive_in_body) {
let a = [];
let i = 0;
for (; i < n; i++) {
a[i] = `p${i}`;
}
- a.push(`...p${i}`)
+ if (!directive_in_body) {
+ // If language mode directive occurs in body, rest parameters will trigger
+ // an early error regardless of language mode.
+ a.push(`...p${i}`);
+ }
return a.join(', ');
}
@@ -68,7 +72,7 @@ function generateSpread(n) {
`f.bind(undefined)(${generateArguments(argumentCount)})`,
`%_CallFunction(${generateArguments(argumentCount, 'undefined')},
f)`,
- `%Call(${generateArguments(argumentCount, 'undefined')}, f)`,
+ `%Call(f, ${generateArguments(argumentCount, 'undefined')})`,
`%Apply(f, undefined, [${generateArguments(argumentCount)}], 0,
${argumentCount})`,
];
@@ -76,6 +80,7 @@ function generateSpread(n) {
for (let call of calls) {
let code = `'use strict'; ${def}; ${call};`;
if (argumentCount < parameterCount) {
+ print(code);
assertThrows(code, TypeError);
} else {
assertDoesNotThrow(code);
@@ -106,13 +111,13 @@ function generateSpread(n) {
for (let parameterCount = 0; parameterCount < 3; parameterCount++) {
let defs = [
`let o = new class {
- m(${genParams(parameterCount)}) { 'use strong'; }
+ m(${genParams(parameterCount, true)}) { 'use strong'; }
}`,
`let o = new class {
- *m(${genParams(parameterCount)}) { 'use strong'; }
+ *m(${genParams(parameterCount, true)}) { 'use strong'; }
}`,
- `let o = { m(${genParams(parameterCount)}) { 'use strong'; } }`,
- `let o = { *m(${genParams(parameterCount)}) { 'use strong'; } }`,
+ `let o = { m(${genParams(parameterCount, true)}) { 'use strong'; } }`,
+ `let o = { *m(${genParams(parameterCount, true)}) { 'use strong'; } }`,
`'use strong';
let o = new class { m(${genParams(parameterCount)}) {} }`,
`'use strong';
@@ -130,7 +135,7 @@ function generateSpread(n) {
`o.m.apply(o, [${generateArguments(argumentCount)}])`,
`o.m.bind(o)(${generateArguments(argumentCount)})`,
`%_CallFunction(${generateArguments(argumentCount, 'o')}, o.m)`,
- `%Call(${generateArguments(argumentCount, 'o')}, o.m)`,
+ `%Call(o.m, ${generateArguments(argumentCount, 'o')})`,
`%Apply(o.m, o, [${generateArguments(argumentCount)}], 0,
${argumentCount})`,
];
@@ -171,10 +176,6 @@ function generateSpread(n) {
let defs = [
`'use strong';
class C { constructor(${genParams(parameterCount)}) {} }`,
- `'use strict';
- class C {
- constructor(${genParams(parameterCount)}) { 'use strong'; }
- }`,
];
for (let def of defs) {
let calls = [
@@ -212,15 +213,6 @@ function generateSpread(n) {
super(${genArgs(argumentCount)});
}
}`,
- `'use strict';
- class B {
- constructor(${genParams(parameterCount)}) { 'use strong'; }
- }
- class C extends B {
- constructor() {
- super(${genArgs(argumentCount)});
- }
- }`,
];
for (let def of defs) {
let code = `${def}; new C();`;
@@ -248,11 +240,6 @@ function generateSpread(n) {
constructor(${genParams(parameterCount)}) {}
}
class C extends B {}`,
- `'use strict';
- class B {
- constructor(${genParams(parameterCount)}) { 'use strong'; }
- }
- class C extends B {}`,
];
for (let def of defs) {
let code = `${def}; new C(${genArgs(argumentCount)})`;
diff --git a/deps/v8/test/mjsunit/strong/literals.js b/deps/v8/test/mjsunit/strong/literals.js
index 8c04d6e35a..a78f793a02 100644
--- a/deps/v8/test/mjsunit/strong/literals.js
+++ b/deps/v8/test/mjsunit/strong/literals.js
@@ -112,14 +112,14 @@
assertWeakArray({a: [], b: {}}.a);
})();
-(function StrongArrayLiterals(...args) {
+(function StrongArrayLiterals() {
'use strong';
function assertStrongArray(x) {
assertTrue(%IsStrong(x));
assertSame(Array.prototype, Object.getPrototypeOf(x));
}
let [...r] = [];
- assertStrongArray(args);
+ assertStrongArray((function(...a) { return a; })());
assertStrongArray(r);
assertStrongArray([]);
assertStrongArray([1, 2, 3]);
@@ -275,66 +275,6 @@ let GeneratorPrototype = (function*(){}).__proto__;
assertStrongGenerator((new class {*m(){'use strong'}}).m);
})();
-(function WeakClassLiterals() {
- function assertWeakClass(x) {
- assertFalse(%IsStrong(x));
- assertFalse(%IsStrong(x.prototype));
- assertFalse(%IsStrong(new x));
- }
- class C {};
- class D extends C {};
- class E extends Object {};
- // class F extends null {};
- const S = (() => {'use strong'; return class {}})();
- class G extends S {};
- assertWeakClass(C);
- assertWeakClass(D);
- assertWeakClass(E);
- // assertWeakClass(F);
- assertWeakClass(G);
- assertWeakClass(class {});
- assertWeakClass(class extends Object {});
- // assertWeakClass(class extends null {});
- assertWeakClass(class extends C {});
- assertWeakClass(class extends S {});
- assertWeakClass(class extends class {} {});
- assertWeakClass(class C {});
- assertWeakClass(class D extends Object {});
- // assertWeakClass(class D extends null {});
- assertWeakClass(class D extends C {});
- assertWeakClass(class D extends S {});
- assertWeakClass(class D extends class {} {});
-})();
-
-(function StrongClassLiterals() {
- 'use strong';
- function assertStrongClass(x) {
- assertTrue(%IsStrong(x));
- assertTrue(%IsStrong(x.prototype));
- // TODO(rossberg): strongify class instance
- // assertTrue(%IsStrong(new x));
- }
- class C {};
- class D extends C {};
- class E extends Object {};
- const W = (1, eval)(() => {'use strict'; return class {}})();
- class G extends W {};
- assertStrongClass(C);
- assertStrongClass(D);
- assertStrongClass(E);
- assertStrongClass(G);
- assertStrongClass(class {});
- assertStrongClass(class extends Object {});
- assertStrongClass(class extends C {});
- assertStrongClass(class extends W {});
- assertStrongClass(class extends class {} {});
- assertStrongClass(class C {});
- assertStrongClass(class D extends Object {});
- assertStrongClass(class D extends C {});
- assertStrongClass(class D extends W {});
- assertStrongClass(class D extends class {} {});
-})();
-
(function WeakRegExpLiterals() {
function assertWeakRegExp(x) {
assertFalse(%IsStrong(x));
diff --git a/deps/v8/test/mjsunit/switch.js b/deps/v8/test/mjsunit/switch.js
index 6a61fe5940..4722e9e5d8 100644
--- a/deps/v8/test/mjsunit/switch.js
+++ b/deps/v8/test/mjsunit/switch.js
@@ -460,3 +460,58 @@ function test_switches(opt) {
test_switches(false);
test_switches(true);
+
+
+// Test labeled and anonymous breaks in switch statements
+(function test_switch_break() {
+ A: for (var i = 1; i < 10; i++) {
+ switch (i) {
+ case 1:
+ break A;
+ }
+ }
+ assertEquals(1, i);
+
+ for (var i = 1; i < 10; i++) {
+ B: switch (i) {
+ case 1:
+ break B;
+ }
+ }
+ assertEquals(10, i);
+
+ for (var i = 1; i < 10; i++) {
+ switch (i) {
+ case 1:
+ break;
+ }
+ }
+ assertEquals(10, i);
+
+ switch (1) {
+ case 1:
+ C: for (var i = 1; i < 10; i++) {
+ break C;
+ }
+ i = 2;
+ }
+ assertEquals(2, i);
+
+ switch (1) {
+ case 1:
+ for (var i = 1; i < 10; i++) {
+ break;
+ }
+ i = 2;
+ }
+ assertEquals(2, i);
+
+ D: switch (1) {
+ case 1:
+ for (var i = 1; i < 10; i++) {
+ break D;
+ }
+ i = 2;
+ }
+ assertEquals(1, i);
+})();
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index cf4b6276e4..7af7acf0a9 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -45,14 +45,16 @@ class MjsunitTestSuite(testsuite.TestSuite):
def ListTests(self, context):
tests = []
- for dirname, dirs, files in os.walk(self.root):
+ for dirname, dirs, files in os.walk(self.root, followlinks=True):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
dirs.sort()
files.sort()
for filename in files:
if filename.endswith(".js") and filename != "mjsunit.js":
- testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.only-summary b/deps/v8/test/mjsunit/tools/tickprocessor-test.only-summary
new file mode 100644
index 0000000000..0bee2fc270
--- /dev/null
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.only-summary
@@ -0,0 +1,9 @@
+Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
+
+ [Summary]:
+ ticks total nonlib name
+ 0 0.0% 0.0% JavaScript
+ 5 38.5% 55.6% C++
+ 0 0.0% 0.0% GC
+ 4 30.8% Shared libraries
+ 2 15.4% Unaccounted
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor.js b/deps/v8/test/mjsunit/tools/tickprocessor.js
index b04b9a1765..4ea25f9445 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor.js
+++ b/deps/v8/test/mjsunit/tools/tickprocessor.js
@@ -370,7 +370,7 @@ PrintMonitor.prototype.finish = function() {
function driveTickProcessorTest(
- separateIc, ignoreUnknown, stateFilter, logInput, refOutput) {
+ separateIc, ignoreUnknown, stateFilter, logInput, refOutput, onlySummary) {
// TEST_FILE_NAME must be provided by test runner.
assertEquals('string', typeof TEST_FILE_NAME);
var pathLen = TEST_FILE_NAME.lastIndexOf('/');
@@ -387,7 +387,10 @@ function driveTickProcessorTest(
undefined,
"0",
"auto,auto",
- false);
+ false,
+ false,
+ false,
+ onlySummary);
var pm = new PrintMonitor(testsPath + refOutput);
tp.processLogFileInTest(testsPath + logInput);
tp.printStatistics();
@@ -399,19 +402,23 @@ function driveTickProcessorTest(
var testData = {
'Default': [
false, false, null,
- 'tickprocessor-test.log', 'tickprocessor-test.default'],
+ 'tickprocessor-test.log', 'tickprocessor-test.default', false],
'SeparateIc': [
true, false, null,
- 'tickprocessor-test.log', 'tickprocessor-test.separate-ic'],
+ 'tickprocessor-test.log', 'tickprocessor-test.separate-ic', false],
'IgnoreUnknown': [
false, true, null,
- 'tickprocessor-test.log', 'tickprocessor-test.ignore-unknown'],
+ 'tickprocessor-test.log', 'tickprocessor-test.ignore-unknown', false],
'GcState': [
false, false, TickProcessor.VmStates.GC,
- 'tickprocessor-test.log', 'tickprocessor-test.gc-state'],
+ 'tickprocessor-test.log', 'tickprocessor-test.gc-state', false],
'FunctionInfo': [
false, false, null,
- 'tickprocessor-test-func-info.log', 'tickprocessor-test.func-info']
+ 'tickprocessor-test-func-info.log', 'tickprocessor-test.func-info',
+ false],
+ 'OnlySummary': [
+ false, false, null,
+ 'tickprocessor-test.log', 'tickprocessor-test.only-summary', true]
};
for (var testName in testData) {
print('=== testProcessing-' + testName + ' ===');
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 94278e39b3..d70c922b0d 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -85,6 +85,8 @@
'ecma/String/15.5.4.8-1': [FAIL],
'ecma/String/15.5.4.9-1': [FAIL],
+ # Date.prototype is an Object, not a Date
+ 'ecma/Date/15.9.5': [FAIL],
##################### SKIPPED TESTS #####################
@@ -849,6 +851,15 @@
}], # ALWAYS
+['no_i18n == True and mode == debug', {
+ # Tests too slow for no18n debug.
+ 'ecma_3/Statements/regress-302439': [PASS, FAST_VARIANTS],
+ 'js1_5/Regress/regress-98901': [SKIP],
+ 'ecma_3/RegExp/perlstress-001': [PASS, FAST_VARIANTS],
+ 'js1_5/extensions/regress-311161': [FAIL_OK, FAST_VARIANTS],
+}], # 'no_i18n == True and mode == debug'
+
+
['arch == arm or arch == arm64', {
# BUG(3251229): Times out when running new crankshaft test script.
@@ -876,7 +887,7 @@
}], # 'arch == arm64'
-['arch == mipsel or arch == mips64el', {
+['arch == mipsel or arch == mips64el or arch == mips64', {
# BUG(3251229): Times out when running new crankshaft test script.
'ecma_3/RegExp/regress-311414': [SKIP],
@@ -893,7 +904,7 @@
# BUG(1040): Allow this test to timeout.
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
-}], # 'arch == mipsel or arch == mips64el'
+}], # 'arch == mipsel or arch == mips64el or arch == mips64'
['arch == mipsel and simulator_run == True', {
# Crashes due to C stack overflow.
diff --git a/deps/v8/test/mozilla/testcfg.py b/deps/v8/test/mozilla/testcfg.py
index 70a7ac663c..5316105ca5 100644
--- a/deps/v8/test/mozilla/testcfg.py
+++ b/deps/v8/test/mozilla/testcfg.py
@@ -34,11 +34,12 @@ import tarfile
from testrunner.local import testsuite
from testrunner.objects import testcase
+SVN_SERVER = (
+ "svn://svn.chromium.org/chrome/trunk/deps/third_party/mozilla-tests")
+MOZILLA_VERSION = "51236"
-MOZILLA_VERSION = "2010-06-29"
-
-EXCLUDED = ["CVS"]
+EXCLUDED = ["CVS", ".svn"]
FRAMEWORK = """
@@ -81,8 +82,9 @@ class MozillaTestSuite(testsuite.TestSuite):
files.sort()
for filename in files:
if filename.endswith(".js") and not filename in FRAMEWORK:
- testname = os.path.join(dirname[len(self.testroot) + 1:],
- filename[:-3])
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.testroot) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
case = testcase.TestCase(self, testname)
tests.append(case)
return tests
@@ -93,7 +95,7 @@ class MozillaTestSuite(testsuite.TestSuite):
result += ["--expose-gc"]
result += [os.path.join(self.root, "mozilla-shell-emulation.js")]
testfilename = testcase.path + ".js"
- testfilepath = testfilename.split(os.path.sep)
+ testfilepath = testfilename.split("/")
for i in xrange(len(testfilepath)):
script = os.path.join(self.testroot,
reduce(os.path.join, testfilepath[:i], ""),
@@ -146,9 +148,9 @@ class MozillaTestSuite(testsuite.TestSuite):
os.chdir(old_cwd)
return
- # No cached copy. Check out via CVS, and pack as .tar.gz for later use.
- command = ("cvs -d :pserver:anonymous@cvs-mirror.mozilla.org:/cvsroot"
- " co -D %s mozilla/js/tests" % MOZILLA_VERSION)
+ # No cached copy. Check out via SVN, and pack as .tar.gz for later use.
+ command = ("svn co -r %s %s mozilla/js/tests" %
+ (MOZILLA_VERSION, SVN_SERVER))
code = subprocess.call(command, shell=True)
if code != 0:
os.chdir(old_cwd)
diff --git a/deps/v8/test/simdjs/SimdJs.json b/deps/v8/test/simdjs/SimdJs.json
index ae2a32e308..e0683226d4 100644
--- a/deps/v8/test/simdjs/SimdJs.json
+++ b/deps/v8/test/simdjs/SimdJs.json
@@ -1,6 +1,6 @@
{
"flags": [
- "--harmony-object",
+ "--harmony-simd",
"test/simdjs/harness-adapt.js"
],
"name": "SIMDJS",
@@ -9,7 +9,6 @@
],
"resources": [
"test/simdjs/data/src/benchmarks/base.js",
- "test/simdjs/data/src/ecmascript_simd.js",
"test/simdjs/harness-adapt.js",
"test/simdjs/harness-finish.js",
"test/simdjs/data/src/benchmarks/kernel-template.js",
@@ -239,7 +238,7 @@
]
}
],
- "timeout_arm": 240,
+ "timeout_arm": 480,
"timeout_arm64": 120,
"units": "ms"
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/simdjs/generate.py b/deps/v8/test/simdjs/generate.py
index b100a94ae6..2ddd6d82ab 100755
--- a/deps/v8/test/simdjs/generate.py
+++ b/deps/v8/test/simdjs/generate.py
@@ -37,11 +37,10 @@ output = {
'units': 'ms',
'resources': [
'test/simdjs/data/src/benchmarks/base.js',
- 'test/simdjs/data/src/ecmascript_simd.js',
'test/simdjs/harness-adapt.js',
'test/simdjs/harness-finish.js'
] + ['test/simdjs/data/src/benchmarks/%s.js' % t for t in tests],
- 'flags': ['--harmony-object', 'test/simdjs/harness-adapt.js'],
+ 'flags': ['test/simdjs/harness-adapt.js'],
'path': ['../../'],
'tests': [
{
diff --git a/deps/v8/test/simdjs/harness-adapt.js b/deps/v8/test/simdjs/harness-adapt.js
index c90d6cc9d1..252eb41ba5 100644
--- a/deps/v8/test/simdjs/harness-adapt.js
+++ b/deps/v8/test/simdjs/harness-adapt.js
@@ -21,8 +21,9 @@ load = function(filename) {
}
};
-// TODO(bbudge): Drop when polyfill is not needed.
-load('ecmascript_simd.js');
+// To enable SIMD polyfill, load ecmascript_simd.js here,
+// add to resources in SimdJs.json as well as the script
+// to re-generate SimdJs.json.
load('base.js');
diff --git a/deps/v8/test/simdjs/testcfg.py b/deps/v8/test/simdjs/testcfg.py
index cbe880d149..1d02fd2e96 100644
--- a/deps/v8/test/simdjs/testcfg.py
+++ b/deps/v8/test/simdjs/testcfg.py
@@ -14,8 +14,8 @@ from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
-SIMDJS_ARCHIVE_REVISION = "99ef44bd4f22acd203c01e524131bc7f2a7eab68"
-SIMDJS_ARCHIVE_MD5 = "1428773887924fa5a784bf0843615740"
+SIMDJS_ARCHIVE_REVISION = "c8ef63c728283debc25891123eb00482fee4b8cd"
+SIMDJS_ARCHIVE_MD5 = "4c3120d1f5b8027b4a38b931119c89bd"
SIMDJS_URL = ("https://github.com/tc39/ecmascript_simd/archive/%s.tar.gz")
SIMDJS_SUITE_PATH = ["data", "src"]
@@ -44,7 +44,7 @@ class SimdJsTestSuite(testsuite.TestSuite):
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags +
[os.path.join(self.root, "harness-adapt.js"),
- "--harmony",
+ "--harmony", "--harmony-simd",
os.path.join(self.testroot, testcase.path + ".js"),
os.path.join(self.root, "harness-finish.js")])
diff --git a/deps/v8/test/test262-es6/README b/deps/v8/test/test262-es6/README
deleted file mode 100644
index fe3ab232ba..0000000000
--- a/deps/v8/test/test262-es6/README
+++ /dev/null
@@ -1,18 +0,0 @@
-This directory contains code for binding the test262 test suite
-into the v8 test harness. To use the tests check out the test262
-tests from
-
- https://github.com/tc39/test262
-
-at hash c6ac390 (2015/07/06 revision) as 'data' in this directory. Using later
-version may be possible but the tests are only known to pass (and indeed run)
-with that revision.
-
- git clone https://github.com/tc39/test262 data
- cd data
- git checkout c6ac390
-
-If you do update to a newer revision you may have to change the test
-harness adapter code since it uses internal functionality from the
-harness that comes bundled with the tests. You will most likely also
-have to update the test expectation file.
diff --git a/deps/v8/test/test262-es6/harness-adapt.js b/deps/v8/test/test262-es6/harness-adapt.js
deleted file mode 100644
index 60c0858f02..0000000000
--- a/deps/v8/test/test262-es6/harness-adapt.js
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-function fnGlobalObject() { return (function() { return this; })(); }
-
-var ES5Harness = (function() {
- var currentTest = {};
- var $this = this;
-
- function Test262Error(id, path, description, codeString,
- preconditionString, result, error) {
- this.id = id;
- this.path = path;
- this.description = description;
- this.result = result;
- this.error = error;
- this.code = codeString;
- this.pre = preconditionString;
- }
-
- Test262Error.prototype.toString = function() {
- return this.result + " " + this.error;
- }
-
- function registerTest(test) {
- if (!(test.precondition && !test.precondition())) {
- var error;
- try {
- var res = test.test.call($this);
- } catch(e) {
- res = 'fail';
- error = e;
- }
- var retVal = /^s/i.test(test.id)
- ? (res === true || typeof res == 'undefined' ? 'pass' : 'fail')
- : (res === true ? 'pass' : 'fail');
-
- if (retVal != 'pass') {
- var precondition = (test.precondition !== undefined)
- ? test.precondition.toString()
- : '';
-
- throw new Test262Error(
- test.id,
- test.path,
- test.description,
- test.test.toString(),
- precondition,
- retVal,
- error);
- }
- }
- }
-
- return {
- registerTest: registerTest
- }
-})();
-
-function $DONE(arg){
- if (arg) {
- print('FAILED! Error: ' + arg);
- quit(1);
- }
-
- quit(0);
-};
diff --git a/deps/v8/test/test262-es6/test262-es6.status b/deps/v8/test/test262-es6/test262-es6.status
deleted file mode 100644
index b01b46ece2..0000000000
--- a/deps/v8/test/test262-es6/test262-es6.status
+++ /dev/null
@@ -1,814 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-[
-[ALWAYS, {
- ############################### BUGS ###################################
-
- # BUG(v8:3455)
- 'intl402/11.2.3_b': [FAIL],
- 'intl402/12.2.3_b': [FAIL],
-
- # BUG(v8:4267)
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-112': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-113': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-164': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-165': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-166': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-168': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-169': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-170': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-172': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-173': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-175': [FAIL],
- 'built-ins/Object/defineProperties/15.2.3.7-6-a-176': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-116': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-117': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-168': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-169': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-170': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-172': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-173': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-174': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-176': [FAIL],
- 'built-ins/Object/defineProperty/15.2.3.6-4-177': [FAIL],
-
- # Unicode canonicalization is not available with i18n turned off.
- 'built-ins/String/prototype/localeCompare/15.5.4.9_CE': [['no_i18n', SKIP]],
-
- ###################### NEEDS INVESTIGATION #######################
-
- # Possibly same cause as S8.5_A2.1, below: floating-point tests.
- 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
- 'built-ins/Math/sin/S15.8.2.16_A7': [PASS, FAIL_OK],
- 'built-ins/Math/tan/S15.8.2.18_A7': [PASS, FAIL_OK],
-
- # This is an incompatibility between ES5 and V8 on enumerating
- # shadowed elements in a for..in loop.
- # https://code.google.com/p/v8/issues/detail?id=705
- 'language/statements/for-in/12.6.4-2': [PASS, FAIL_OK],
-
- ###################### MISSING ES6 FEATURES #######################
-
- # Class, let, const in sloppy mode.
- # https://code.google.com/p/v8/issues/detail?id=3305
- 'language/block-scope/leave/finally-block-let-declaration-only-shadows-outer-parameter-value-1': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/finally-block-let-declaration-only-shadows-outer-parameter-value-2': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/for-loop-block-let-declaration-only-shadows-outer-parameter-value-1': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/for-loop-block-let-declaration-only-shadows-outer-parameter-value-2': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-1': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/nested-block-let-declaration-only-shadows-outer-parameter-value-2': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/outermost-binding-updated-in-catch-block-nested-block-let-declaration-unseen-outside-of-block': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/try-block-let-declaration-only-shadows-outer-parameter-value-1': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/try-block-let-declaration-only-shadows-outer-parameter-value-2': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/verify-context-in-finally-block': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/verify-context-in-for-loop-block': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/verify-context-in-labelled-block': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/verify-context-in-try-block': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/x-after-break-to-label': [PASS, FAIL_SLOPPY],
- 'language/block-scope/leave/x-before-continue': [PASS, FAIL_SLOPPY],
- 'language/block-scope/return-from/block-let': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/catch-parameter-shadowing-let-declaration': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/const-declaration-shadowing-catch-parameter': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/const-declarations-shadowing-parameter-name-let-const-and-var-variables': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/dynamic-lookup-from-closure': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/dynamic-lookup-in-and-through-block-contexts': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/let-declaration-shadowing-catch-parameter': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/let-declarations-shadowing-parameter-name-let-const-and-var': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/lookup-from-closure': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/lookup-in-and-through-block-contexts': [PASS, FAIL_SLOPPY],
- 'language/block-scope/shadowing/parameter-name-shadowing-parameter-name-let-const-and-var': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/for-in/acquire-properties-from-array': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/for-in/acquire-properties-from-object': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/for-in/mixed-values-in-iteration': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-var': [PASS, FAIL_SLOPPY],
- 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-var-with-function-declaration': [PASS, FAIL_SLOPPY],
- 'language/statements/const/block-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/const/block-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/block-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/function-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/const/function-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/function-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/global-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/const/global-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/global-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/block-scope-syntax-const-declarations-mixed-with-without-initialiser': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/block-scope-syntax-const-declarations-mixed-without-with-initialiser': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/block-scope-syntax-const-declarations-without-initialiser': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/const': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/const-invalid-assignment-statement-body-for-in': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/const-invalid-assignment-statement-body-for-of': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/const-outer-inner-let-bindings': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-do-statement-while-expression': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-for-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-if-expression-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-label-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/with-initializer-while-expression-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-case-expression-statement-list': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-default-statement-list': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-do-statement-while-expression': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-for-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-if-expression-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-label-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/const/syntax/without-initializer-while-expression-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/labeled-continue': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/nested-let-bound-for-loops-inner-continue': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/nested-let-bound-for-loops-labeled-continue': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/nested-let-bound-for-loops-outer-continue': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/no-label-continue': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/shadowing-loop-variable-in-same-scope-as-continue': [PASS, FAIL_SLOPPY],
- 'language/statements/continue/simple-and-labeled': [PASS, FAIL_SLOPPY],
- 'language/statements/for-in/const-bound-names-fordecl-tdz-for-in': [PASS, FAIL_SLOPPY],
- 'language/statements/for-in/const-fresh-binding-per-iteration-for-in': [PASS, FAIL_SLOPPY],
- 'language/statements/for-in/let-bound-names-fordecl-tdz-for-in': [PASS, FAIL_SLOPPY],
- 'language/statements/for-in/let-fresh-binding-per-iteration-for-in': [PASS, FAIL_SLOPPY],
- 'language/statements/for-of/const-bound-names-fordecl-tdz-for-of': [PASS, FAIL_SLOPPY],
- 'language/statements/for-of/const-fresh-binding-per-iteration-for-of': [PASS, FAIL_SLOPPY],
- 'language/statements/for-of/let-bound-names-fordecl-tdz-for-of': [PASS, FAIL_SLOPPY],
- 'language/statements/for-of/let-fresh-binding-per-iteration-for-of': [PASS, FAIL_SLOPPY],
- 'language/statements/for/const-fresh-binding-per-iteration-for': [PASS, FAIL_SLOPPY],
- 'language/statements/for/let-fresh-binding-per-iteration-for': [PASS, FAIL_SLOPPY],
- 'language/statements/let/block-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/block-local-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/block-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/let/block-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/let/function-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/function-local-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/function-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/let/function-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
- 'language/statements/let/global-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/global-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let-closure-inside-condition': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let-closure-inside-initialization': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let-closure-inside-next-expression': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let-iteration-variable-is-freshly-allocated-for-each-iteration-multi-let-binding': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let-iteration-variable-is-freshly-allocated-for-each-iteration-single-let-binding': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/let-outer-inner-let-bindings': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/with-initialisers-in-statement-positions-case-expression-statement-list': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/with-initialisers-in-statement-positions-default-statement-list': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/without-initialisers-in-statement-positions-case-expression-statement-list': [PASS, FAIL_SLOPPY],
- 'language/statements/let/syntax/without-initialisers-in-statement-positions-default-statement-list': [PASS, FAIL_SLOPPY],
-
- # https://code.google.com/p/v8/issues/detail?id=3305
- # This times out in sloppy mode because sloppy const assignment does not throw.
- 'language/statements/const/syntax/const-invalid-assignment-next-expression-for': [PASS, FAIL, TIMEOUT],
-
- # Number/Boolean.prototype is a plain object in ES6
- # https://code.google.com/p/v8/issues/detail?id=4001
- 'built-ins/Boolean/prototype/S15.6.3.1_A1': [FAIL],
- 'built-ins/Boolean/prototype/S15.6.4_A1': [FAIL],
- 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T1': [FAIL],
- 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T2': [FAIL],
- 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T1': [FAIL],
- 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T2': [FAIL],
- 'built-ins/Number/15.7.4-1': [FAIL],
- 'built-ins/Number/prototype/S15.7.3.1_A2_*': [FAIL],
- 'built-ins/Number/prototype/S15.7.3.1_A3': [FAIL],
- 'built-ins/Number/prototype/S15.7.4_A1': [FAIL],
- 'built-ins/Number/prototype/toFixed/S15.7.4.5_A1.1_T01': [FAIL],
- 'built-ins/Number/prototype/toString/S15.7.4.2_A1_*': [FAIL],
- 'built-ins/Number/prototype/toString/S15.7.4.2_A2_*': [FAIL],
- 'built-ins/Number/prototype/valueOf/S15.7.4.4_A1_*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3087
- 'built-ins/Array/prototype/every/15.4.4.16-3-12': [FAIL],
- 'built-ins/Array/prototype/every/15.4.4.16-3-14': [FAIL],
- 'built-ins/Array/prototype/every/15.4.4.16-3-25': [FAIL],
- 'built-ins/Array/prototype/every/15.4.4.16-3-29': [FAIL],
- 'built-ins/Array/prototype/every/15.4.4.16-3-7': [FAIL],
- 'built-ins/Array/prototype/every/15.4.4.16-3-8': [FAIL],
- 'built-ins/Array/prototype/filter/15.4.4.20-3-12': [FAIL],
- 'built-ins/Array/prototype/filter/15.4.4.20-3-25': [FAIL],
- 'built-ins/Array/prototype/filter/15.4.4.20-3-7': [FAIL],
- 'built-ins/Array/prototype/forEach/15.4.4.18-3-12': [FAIL],
- 'built-ins/Array/prototype/forEach/15.4.4.18-3-25': [FAIL],
- 'built-ins/Array/prototype/forEach/15.4.4.18-3-7': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-12': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-14': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-25': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-28': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-29': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-7': [FAIL],
- 'built-ins/Array/prototype/indexOf/15.4.4.14-3-8': [FAIL],
- 'built-ins/Array/prototype/join/S15.4.4.5_A4_T3': [FAIL],
- 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-12': [FAIL],
- 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-25': [FAIL],
- 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-28': [FAIL],
- 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-7': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-12': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-14': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-25': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-28': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-29': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-7': [FAIL],
- 'built-ins/Array/prototype/map/15.4.4.19-3-8': [FAIL],
- 'built-ins/Array/prototype/pop/S15.4.4.6_A2_T2': [FAIL],
- 'built-ins/Array/prototype/pop/S15.4.4.6_A3_T1': [FAIL],
- 'built-ins/Array/prototype/pop/S15.4.4.6_A3_T2': [FAIL],
- 'built-ins/Array/prototype/pop/S15.4.4.6_A3_T3': [FAIL],
- 'built-ins/Array/prototype/push/S15.4.4.7_A2_T2': [FAIL],
- 'built-ins/Array/prototype/push/S15.4.4.7_A4_T1': [FAIL],
- 'built-ins/Array/prototype/push/S15.4.4.7_A4_T3': [FAIL],
- 'built-ins/Array/prototype/reduce/15.4.4.21-3-12': [FAIL],
- 'built-ins/Array/prototype/reduce/15.4.4.21-3-25': [FAIL],
- 'built-ins/Array/prototype/reduce/15.4.4.21-3-7': [FAIL],
- 'built-ins/Array/prototype/reduceRight/15.4.4.22-3-12': [FAIL],
- 'built-ins/Array/prototype/reduceRight/15.4.4.22-3-25': [FAIL],
- 'built-ins/Array/prototype/reduceRight/15.4.4.22-3-7': [FAIL],
- 'built-ins/Array/prototype/reverse/S15.4.4.8_A3_T3': [FAIL],
- 'built-ins/Array/prototype/shift/S15.4.4.9_A3_T3': [FAIL],
- 'built-ins/Array/prototype/slice/S15.4.4.10_A3_T1': [FAIL],
- 'built-ins/Array/prototype/slice/S15.4.4.10_A3_T2': [FAIL],
- 'built-ins/Array/prototype/slice/S15.4.4.10_A3_T3': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-12': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-14': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-25': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-28': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-29': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-7': [FAIL],
- 'built-ins/Array/prototype/some/15.4.4.17-3-8': [FAIL],
- 'built-ins/Array/prototype/sort/S15.4.4.11_A4_T3': [FAIL],
- 'built-ins/Array/prototype/splice/S15.4.4.12_A3_T1': [FAIL],
- 'built-ins/Array/prototype/splice/S15.4.4.12_A3_T3': [FAIL],
- 'built-ins/Array/prototype/unshift/S15.4.4.13_A3_T2': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=1543
- 'built-ins/Proxy/*': [FAIL],
- 'built-ins/Array/prototype/find/Array.prototype.find_callable-Proxy-1': [FAIL],
- 'built-ins/Array/prototype/find/Array.prototype.find_callable-Proxy-2': [FAIL],
- 'built-ins/Object/assign/source-own-prop-desc-missing': [FAIL],
- 'built-ins/Object/assign/source-own-prop-error': [FAIL],
- 'built-ins/Object/assign/source-own-prop-keys-error': [FAIL],
- 'built-ins/Object/setPrototypeOf/set-error': [FAIL],
- 'language/expressions/object/prop-def-id-eval-error-2': [FAIL],
- 'language/statements/for-of/iterator-as-proxy': [FAIL],
- 'language/statements/for-of/iterator-next-result-type': [FAIL],
- 'built-ins/Array/of/return-abrupt-from-data-property-using-proxy': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4093
- 'built-ins/Array/symbol-species': [FAIL],
- 'built-ins/Array/symbol-species-name': [FAIL],
- 'built-ins/ArrayBuffer/symbol-species': [FAIL],
- 'built-ins/ArrayBuffer/symbol-species-name': [FAIL],
- 'built-ins/Map/symbol-species': [FAIL],
- 'built-ins/Map/symbol-species-name': [FAIL],
- 'built-ins/Promise/Symbol.species/prop-desc': [FAIL],
- 'built-ins/Promise/Symbol.species/return-value': [FAIL],
- 'built-ins/Promise/all/species-get-error': [FAIL],
- 'built-ins/Promise/prototype/then/ctor-custom': [FAIL],
- 'built-ins/Promise/race/species-get-error': [FAIL],
- 'built-ins/Promise/symbol-species': [FAIL],
- 'built-ins/Promise/symbol-species-name': [FAIL],
- 'built-ins/RegExp/symbol-species': [FAIL],
- 'built-ins/RegExp/symbol-species-name': [FAIL],
- 'built-ins/Set/symbol-species': [FAIL],
- 'built-ins/Set/symbol-species-name': [FAIL],
- 'built-ins/Symbol/species/basic': [FAIL],
- 'built-ins/Symbol/species/builtin-getter-name': [FAIL],
- 'built-ins/Symbol/species/subclassing': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4242
- 'built-ins/Date/15.9.1.15-1': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4004
- 'built-ins/Date/prototype/setFullYear/15.9.5.40_1': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4002
- 'built-ins/Error/prototype/S15.11.4_A2': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4163
- 'built-ins/GeneratorPrototype/next/context-constructor-invocation': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3566
- 'built-ins/Array/from/iter-map-fn-err': [FAIL],
- 'built-ins/Array/from/iter-set-elem-prop-err': [FAIL],
- 'built-ins/Map/iterator-close-after-set-failure': [FAIL],
- 'built-ins/Map/iterator-item-first-entry-returns-abrupt': [FAIL],
- 'built-ins/Map/iterator-item-second-entry-returns-abrupt': [FAIL],
- 'built-ins/Map/iterator-items-are-not-object-close-iterator': [FAIL],
- 'built-ins/Promise/all/iter-close': [FAIL],
- 'built-ins/Set/set-iterator-close-after-add-failure': [FAIL],
- 'built-ins/WeakMap/iterator-close-after-set-failure': [FAIL],
- 'built-ins/WeakMap/iterator-item-first-entry-returns-abrupt': [FAIL],
- 'built-ins/WeakMap/iterator-item-second-entry-returns-abrupt': [FAIL],
- 'built-ins/WeakMap/iterator-items-are-not-object-close-iterator': [FAIL],
- 'built-ins/WeakSet/iterator-close-after-add-failure': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3715
- 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-212': [FAIL],
- 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-213': [FAIL],
- 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-214': [FAIL],
- 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-215': [FAIL],
- 'built-ins/RegExp/prototype/global/15.10.7.2-1': [FAIL],
- 'built-ins/RegExp/prototype/global/15.10.7.2-2': [FAIL],
- 'built-ins/RegExp/prototype/global/S15.10.7.2_A9': [FAIL],
- 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-1': [FAIL],
- 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-2': [FAIL],
- 'built-ins/RegExp/prototype/ignoreCase/S15.10.7.3_A9': [FAIL],
- 'built-ins/RegExp/prototype/lastIndex/15.10.7.5-1': [FAIL],
- 'built-ins/RegExp/prototype/lastIndex/15.10.7.5-2': [FAIL],
- 'built-ins/RegExp/prototype/multiline/15.10.7.4-1': [FAIL],
- 'built-ins/RegExp/prototype/multiline/15.10.7.4-2': [FAIL],
- 'built-ins/RegExp/prototype/multiline/S15.10.7.4_A9': [FAIL],
- 'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
- 'built-ins/RegExp/prototype/source/15.10.7.1-2': [FAIL],
- 'built-ins/RegExp/prototype/source/S15.10.7.1_A9': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4243
- 'built-ins/Promise/race/S25.4.4.3_A3.1_T2': [FAIL],
- 'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4341
- 'built-ins/Promise/resolve/arg-uniq-ctor': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4119
- 'built-ins/RegExp/15.10.4.1-1': [FAIL],
- 'built-ins/RegExp/S15.10.3.1_A2_T1': [FAIL],
- 'built-ins/RegExp/S15.10.3.1_A2_T2': [FAIL],
- 'built-ins/RegExp/S15.10.4.1_A2_T1': [FAIL],
- 'built-ins/RegExp/S15.10.4.1_A2_T2': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4003
- 'built-ins/RegExp/prototype/15.10.6': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4244
- 'built-ins/RegExp/prototype/exec/S15.10.6.2_A5_T3': [FAIL],
- 'built-ins/RegExp/prototype/test/S15.10.6.3_A1_T22': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4006
- 'built-ins/String/prototype/S15.5.4_A1': [FAIL],
- 'built-ins/String/prototype/S15.5.4_A2': [FAIL],
- 'built-ins/String/prototype/S15.5.4_A3': [FAIL],
- 'language/expressions/property-accessors/S11.2.1_A4_T5': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4245
- 'built-ins/String/prototype/split/S15.5.4.14_A2_T37': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4348
- 'built-ins/String/prototype/Symbol.iterator/this-val-non-obj-coercible': [FAIL],
-
- # The order of adding the name property is wrong
- # https://code.google.com/p/v8/issues/detail?id=4199
- 'language/computed-property-names/class/static/method-number': [FAIL, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/method-symbol': [FAIL, FAIL_SLOPPY],
- 'language/computed-property-names/class/static/method-string': [FAIL, FAIL_SLOPPY],
-
- # This should work as soon as rest parameters are re-implemented via desaguring.
- 'language/expressions/arrow-function/syntax/early-errors/arrowparameters-cover-no-duplicates-rest': [PASS, FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=2160
- 'language/expressions/arrow-function/syntax/arrowparameters-cover-initialize-1': [FAIL],
- 'language/expressions/arrow-function/syntax/arrowparameters-cover-initialize-2': [FAIL],
- 'language/expressions/object/method-definition/generator-super-prop-param': [FAIL],
- 'language/expressions/object/method-definition/name-param-init-yield': [FAIL],
- 'language/expressions/object/method-definition/name-super-prop-param': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3673
- 'language/statements/class/definition/basics': [FAIL],
-
- # Destructuring
- # https://code.google.com/p/v8/issues/detail?id=811
- 'language/statements/for-of/body-dstr-assign': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3566
- 'language/statements/for-of/body-dstr-assign-error': [FAIL],
- 'language/statements/for-of/body-put-error': [FAIL],
- 'language/statements/for-of/generator-close-via-break': [FAIL],
- 'language/statements/for-of/generator-close-via-return': [FAIL],
- 'language/statements/for-of/generator-close-via-throw': [FAIL],
- 'language/statements/for-of/iterator-close-get-method-error': [FAIL],
- 'language/statements/for-of/iterator-close-non-object': [FAIL],
- 'language/statements/for-of/iterator-close-via-break': [FAIL],
- 'language/statements/for-of/iterator-close-via-return': [FAIL],
- 'language/statements/for-of/iterator-close-via-throw': [FAIL],
-
- # We do not expose Array.prototype.values
- # https://code.google.com/p/v8/issues/detail?id=4247
- 'built-ins/Array/prototype/Symbol.iterator': [FAIL],
- 'built-ins/Array/prototype/values/returns-iterator': [FAIL],
- 'built-ins/Array/prototype/values/returns-iterator-from-object': [FAIL],
- 'built-ins/Array/prototype/values/prop-desc': [FAIL],
- 'built-ins/Array/prototype/values/name': [FAIL],
- 'built-ins/Array/prototype/values/length': [FAIL],
- 'built-ins/Array/prototype/values/iteration': [FAIL],
- 'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
-
- #https://code.google.com/p/v8/issues/detail?id=3983
- 'language/expressions/generators/yield-as-function-expression-binding-identifier': [FAIL],
- 'language/expressions/generators/yield-as-generator-expression-binding-identifier': [FAIL],
- 'language/expressions/object/method-definition/generator-argSuperProperty': [FAIL],
- 'language/expressions/object/method-definition/yield-as-function-expression-binding-identifier': [FAIL],
- 'language/statements/generators/yield-as-function-expression-binding-identifier': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3566
- 'built-ins/GeneratorPrototype/return/from-state-completed': [FAIL],
- 'built-ins/GeneratorPrototype/return/from-state-suspended-start': [FAIL],
- 'built-ins/GeneratorPrototype/return/property-descriptor': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-before-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-following-catch': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-within-catch': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-catch-within-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-before-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-following-finally': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-catch': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-finally': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-inner-try': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-outer-try-after-nested': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-outer-try-before-nested': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-within-finally': [FAIL],
- 'built-ins/GeneratorPrototype/return/try-finally-within-try': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=811
- 'language/expressions/assignment/destructuring/*': [SKIP],
-
- # https://code.google.com/p/v8/issues/detail?id=4248
- 'language/expressions/compound-assignment/S11.13.2_A5.*': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A6.*': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.10_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.11_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.1_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.2_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.3_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.4_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.5_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.6_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.7_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.8_T4': [FAIL],
- 'language/expressions/compound-assignment/S11.13.2_A7.9_T4': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4249
- 'language/expressions/assignment/S11.13.1_A7_T1': [FAIL],
- 'language/expressions/assignment/S11.13.1_A7_T2': [FAIL],
- 'language/expressions/assignment/S11.13.1_A7_T3': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A6_T3': [FAIL],
- 'language/expressions/postfix-decrement/S11.3.2_A6_T3': [FAIL],
- 'language/expressions/prefix-decrement/S11.4.5_A6_T3': [FAIL],
- 'language/expressions/prefix-increment/S11.4.4_A6_T3': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4250
- 'language/expressions/assignment/S11.13.1_A5*': [FAIL],
- 'language/expressions/assignment/S11.13.1_A6*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=3699
- 'built-ins/Function/instance-name': [FAIL],
- 'built-ins/GeneratorFunction/instance-name': [FAIL],
- 'language/expressions/assignment/fn-name-arrow': [FAIL],
- 'language/expressions/assignment/fn-name-class': [FAIL],
- 'language/expressions/assignment/fn-name-cover': [FAIL],
- 'language/expressions/assignment/fn-name-fn': [FAIL],
- 'language/expressions/assignment/fn-name-gen': [FAIL],
- 'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
- 'language/expressions/assignment/fn-name-lhs-member': [FAIL],
- 'language/expressions/class/name': [FAIL],
- 'language/expressions/function/name': [FAIL],
- 'language/expressions/generators/implicit-name': [FAIL],
- 'language/expressions/generators/name': [FAIL],
- 'language/expressions/generators/name-property-descriptor': [FAIL],
- 'language/expressions/object/fn-name-accessor-get': [FAIL],
- 'language/expressions/object/fn-name-accessor-set': [FAIL],
- 'language/expressions/object/fn-name-arrow': [FAIL],
- 'language/expressions/object/fn-name-class': [FAIL],
- 'language/expressions/object/fn-name-cover': [FAIL],
- 'language/expressions/object/fn-name-fn': [FAIL],
- 'language/expressions/object/fn-name-gen': [FAIL],
- 'language/expressions/object/fn-name-lhs-cover': [FAIL],
- 'language/expressions/object/fn-name-lhs-member': [FAIL],
- 'language/expressions/object/method-definition/fn-name-accessor-get': [FAIL],
- 'language/expressions/object/method-definition/fn-name-accessor-set': [FAIL],
- 'language/expressions/object/method-definition/fn-name-arrow': [FAIL],
- 'language/expressions/object/method-definition/fn-name-class': [FAIL],
- 'language/expressions/object/method-definition/fn-name-cover': [FAIL],
- 'language/expressions/object/method-definition/fn-name-fn': [FAIL],
- 'language/expressions/object/method-definition/fn-name-gen': [FAIL],
- 'language/statements/class/definition/fn-name-accessor-get': [FAIL],
- 'language/statements/class/definition/fn-name-accessor-set': [FAIL],
- 'language/statements/class/definition/fn-name-gen-method': [FAIL],
- 'language/statements/class/definition/fn-name-method': [FAIL],
- 'language/statements/const/fn-name-arrow': [FAIL],
- 'language/statements/const/fn-name-class': [FAIL],
- 'language/statements/const/fn-name-cover': [FAIL],
- 'language/statements/const/fn-name-fn': [FAIL],
- 'language/statements/const/fn-name-gen': [FAIL],
- 'language/statements/let/fn-name-arrow': [FAIL],
- 'language/statements/let/fn-name-class': [FAIL],
- 'language/statements/let/fn-name-cover': [FAIL],
- 'language/statements/let/fn-name-fn': [FAIL],
- 'language/statements/let/fn-name-gen': [FAIL],
- 'language/statements/variable/fn-name-arrow': [FAIL],
- 'language/statements/variable/fn-name-class': [FAIL],
- 'language/statements/variable/fn-name-cover': [FAIL],
- 'language/statements/variable/fn-name-fn': [FAIL],
- 'language/statements/variable/fn-name-gen': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4251
- 'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A5_T2': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A5_T3': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A5_T4': [FAIL],
- 'language/expressions/postfix-increment/S11.3.1_A5_T5': [FAIL],
- 'language/expressions/postfix-decrement/S11.3.2_A5_*': [FAIL],
- 'language/expressions/prefix-decrement/S11.4.5_A5_*': [FAIL],
- 'language/expressions/prefix-increment/S11.4.4_A5_*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4253
- 'language/asi/S7.9_A5.7_T1': [PASS, FAIL_OK],
-
- # https://code.google.com/p/v8/issues/detail?id=3761
- 'language/expressions/object/method-definition/generator-name-prop-symbol': [FAIL],
- 'language/expressions/object/method-definition/name-name-prop-symbol': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4317
- 'built-ins/Array/prototype/concat/is-concat-spreadable-val-falsey': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=2952
- 'built-ins/RegExp/prototype/exec/u-lastindex-adv': [FAIL],
- 'built-ins/RegExp/prototype/exec/u-captured-value': [FAIL],
- 'built-ins/RegExp/prototype/exec/u-lastindex-value': [FAIL],
- 'built-ins/RegExp/prototype/test/u-captured-value': [FAIL],
- 'built-ins/RegExp/prototype/test/u-lastindex-adv': [FAIL],
- 'built-ins/RegExp/prototype/test/u-lastindex-value': [FAIL],
- 'built-ins/RegExp/prototype/unicode/length': [FAIL],
- 'built-ins/RegExp/prototype/unicode/name': [FAIL],
- 'built-ins/RegExp/prototype/unicode/prop-desc': [FAIL],
- 'built-ins/RegExp/prototype/unicode/this-invald-obj': [FAIL],
- 'built-ins/RegExp/prototype/unicode/this-non-obj': [FAIL],
- 'built-ins/RegExp/prototype/unicode/this-regexp': [FAIL],
- 'built-ins/RegExp/unicode_identity_escape': [FAIL],
- 'language/literals/regexp/u-unicode-esc': [FAIL],
- 'language/literals/regexp/u-surrogate-pairs': [FAIL],
- 'language/literals/regexp/u-case-mapping': [FAIL],
- 'language/literals/regexp/u-astral': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4342
- 'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
- 'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/exec/y-fail-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [FAIL],
- 'built-ins/RegExp/prototype/exec/y-fail-return': [FAIL],
- 'built-ins/RegExp/prototype/exec/y-fail-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/exec/y-init-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/exec/y-set-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/sticky/prop-desc': [FAIL],
- 'built-ins/RegExp/prototype/sticky/this-invalid-obj': [FAIL],
- 'built-ins/RegExp/prototype/sticky/this-non-obj': [FAIL],
- 'built-ins/RegExp/prototype/sticky/this-regexp': [FAIL],
- 'built-ins/RegExp/prototype/test/get-sticky-coerce': [FAIL],
- 'built-ins/RegExp/prototype/test/get-sticky-err': [FAIL],
- 'built-ins/RegExp/prototype/test/y-fail-lastindex-no-write': [FAIL],
- 'built-ins/RegExp/prototype/test/y-fail-return': [FAIL],
- 'built-ins/RegExp/prototype/test/y-fail-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/test/y-init-lastindex': [FAIL],
- 'built-ins/RegExp/prototype/test/y-set-lastindex': [FAIL],
- 'built-ins/RegExp/valid-flags-y': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4305
- 'built-ins/RegExp/prototype/Symbol.match/*': [FAIL],
- 'built-ins/String/prototype/endsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
- 'built-ins/String/prototype/includes/return-abrupt-from-searchstring-regexp-test': [FAIL],
- 'built-ins/String/prototype/startsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4343
- 'built-ins/RegExp/prototype/Symbol.replace/*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4344
- 'built-ins/RegExp/prototype/Symbol.search/*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4345
- 'built-ins/RegExp/prototype/Symbol.split/*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4346
- 'built-ins/RegExp/prototype/flags/*': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4347
- 'built-ins/RegExp/prototype/global/name': [FAIL],
- 'built-ins/RegExp/prototype/ignoreCase/name': [FAIL],
- 'built-ins/RegExp/prototype/multiline/name': [FAIL],
- 'built-ins/RegExp/prototype/source/name': [FAIL],
- 'built-ins/RegExp/prototype/sticky/name': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4360
- 'intl402/Collator/10.1.1_1': [FAIL],
- 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
- 'intl402/NumberFormat/11.1.1_1': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4361
- 'intl402/Collator/10.1.1_a': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=1972
- 'language/identifiers/val-break-via-escape-hex': [FAIL],
- 'language/identifiers/val-break-via-escape-hex4': [FAIL],
- 'language/identifiers/val-case-via-escape-hex': [FAIL],
- 'language/identifiers/val-case-via-escape-hex4': [FAIL],
- 'language/identifiers/val-catch-via-escape-hex': [FAIL],
- 'language/identifiers/val-catch-via-escape-hex4': [FAIL],
- 'language/identifiers/val-class-via-escape-hex': [FAIL],
- 'language/identifiers/val-class-via-escape-hex4': [FAIL],
- 'language/identifiers/val-const-via-escape-hex': [FAIL],
- 'language/identifiers/val-const-via-escape-hex4': [FAIL],
- 'language/identifiers/val-continue-via-escape-hex': [FAIL],
- 'language/identifiers/val-continue-via-escape-hex4': [FAIL],
- 'language/identifiers/val-debugger-via-escape-hex': [FAIL],
- 'language/identifiers/val-debugger-via-escape-hex4': [FAIL],
- 'language/identifiers/val-default-via-escape-hex': [FAIL],
- 'language/identifiers/val-default-via-escape-hex4': [FAIL],
- 'language/identifiers/val-delete-via-escape-hex': [FAIL],
- 'language/identifiers/val-delete-via-escape-hex4': [FAIL],
- 'language/identifiers/val-do-via-escape-hex': [FAIL],
- 'language/identifiers/val-do-via-escape-hex4': [FAIL],
- 'language/identifiers/val-else-via-escape-hex': [FAIL],
- 'language/identifiers/val-else-via-escape-hex4': [FAIL],
- 'language/identifiers/val-enum-via-escape-hex': [FAIL],
- 'language/identifiers/val-enum-via-escape-hex4': [FAIL],
- 'language/identifiers/val-export-via-escape-hex': [FAIL],
- 'language/identifiers/val-export-via-escape-hex4': [FAIL],
- 'language/identifiers/val-extends-via-escape-hex': [FAIL],
- 'language/identifiers/val-extends-via-escape-hex4': [FAIL],
- 'language/identifiers/val-false-via-escape-hex': [FAIL],
- 'language/identifiers/val-false-via-escape-hex4': [FAIL],
- 'language/identifiers/val-finally-via-escape-hex': [FAIL],
- 'language/identifiers/val-finally-via-escape-hex4': [FAIL],
- 'language/identifiers/val-for-via-escape-hex': [FAIL],
- 'language/identifiers/val-for-via-escape-hex4': [FAIL],
- 'language/identifiers/val-function-via-escape-hex': [FAIL],
- 'language/identifiers/val-function-via-escape-hex4': [FAIL],
- 'language/identifiers/val-if-via-escape-hex': [FAIL],
- 'language/identifiers/val-if-via-escape-hex4': [FAIL],
- 'language/identifiers/val-import-via-escape-hex': [FAIL],
- 'language/identifiers/val-import-via-escape-hex4': [FAIL],
- 'language/identifiers/val-in-via-escape-hex': [FAIL],
- 'language/identifiers/val-in-via-escape-hex4': [FAIL],
- 'language/identifiers/val-instanceof-via-escape-hex': [FAIL],
- 'language/identifiers/val-instanceof-via-escape-hex4': [FAIL],
- 'language/identifiers/val-new-via-escape-hex': [FAIL],
- 'language/identifiers/val-new-via-escape-hex4': [FAIL],
- 'language/identifiers/val-null-via-escape-hex': [FAIL],
- 'language/identifiers/val-null-via-escape-hex4': [FAIL],
- 'language/identifiers/val-return-via-escape-hex': [FAIL],
- 'language/identifiers/val-return-via-escape-hex4': [FAIL],
- 'language/identifiers/val-super-via-escape-hex': [FAIL],
- 'language/identifiers/val-super-via-escape-hex4': [FAIL],
- 'language/identifiers/val-switch-via-escape-hex': [FAIL],
- 'language/identifiers/val-switch-via-escape-hex4': [FAIL],
- 'language/identifiers/val-throw-via-escape-hex': [FAIL],
- 'language/identifiers/val-throw-via-escape-hex4': [FAIL],
- 'language/identifiers/val-true-via-escape-hex': [FAIL],
- 'language/identifiers/val-true-via-escape-hex4': [FAIL],
- 'language/identifiers/val-try-via-escape-hex': [FAIL],
- 'language/identifiers/val-try-via-escape-hex4': [FAIL],
- 'language/identifiers/val-typeof-via-escape-hex': [FAIL],
- 'language/identifiers/val-typeof-via-escape-hex4': [FAIL],
- 'language/identifiers/val-var-via-escape-hex': [FAIL],
- 'language/identifiers/val-var-via-escape-hex4': [FAIL],
- 'language/identifiers/val-void-via-escape-hex': [FAIL],
- 'language/identifiers/val-void-via-escape-hex4': [FAIL],
- 'language/identifiers/val-while-via-escape-hex': [FAIL],
- 'language/identifiers/val-while-via-escape-hex4': [FAIL],
- 'language/identifiers/val-with-via-escape-hex': [FAIL],
- 'language/identifiers/val-with-via-escape-hex4': [FAIL],
-
- # https://code.google.com/p/v8/issues/detail?id=4362
- 'built-ins/String/prototype/repeat/empty-string-returns-empty': [PASS, FAIL],
-
- ######################## NEEDS INVESTIGATION ###########################
-
- # These test failures are specific to the intl402 suite and need investigation
- # to be either marked as bugs with issues filed for them or as deliberate
- # incompatibilities if the test cases turn out to be broken or ambiguous.
- # Some of these are related to v8:4361 in being visible side effects from Intl.
- 'intl402/6.2.3': [FAIL],
- 'intl402/9.2.1_2': [FAIL],
- 'intl402/9.2.6_2': [FAIL],
- 'intl402/Collator/10.1.2.1_4': [FAIL],
- 'intl402/Collator/10.1.2_a': [PASS, FAIL],
- 'intl402/Collator/10.2.3_b': [PASS, FAIL],
- 'intl402/Collator/prototype/10.3_a': [FAIL],
- 'intl402/Date/prototype/13.3.0_7': [FAIL],
- 'intl402/DateTimeFormat/12.1.1': [FAIL],
- 'intl402/DateTimeFormat/12.1.1_a': [FAIL],
- 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
- 'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
- 'intl402/DateTimeFormat/12.1.2.1_4': [FAIL],
- 'intl402/DateTimeFormat/12.2.3_b': [FAIL],
- 'intl402/DateTimeFormat/prototype/12.3.2_FDT_7_a_iv': [FAIL],
- 'intl402/DateTimeFormat/prototype/12.3.3': [FAIL],
- 'intl402/DateTimeFormat/prototype/12.3_a': [FAIL],
- 'intl402/DateTimeFormat/prototype/format/12.3.2_FDT_7_a_iv': [FAIL],
- 'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
- 'intl402/NumberFormat/11.1.1_20_c': [FAIL],
- 'intl402/NumberFormat/11.1.1_a': [FAIL],
- 'intl402/NumberFormat/11.1.1': [FAIL],
- 'intl402/NumberFormat/11.1.2': [PASS, FAIL],
- 'intl402/NumberFormat/11.1.2.1_4': [FAIL],
- 'intl402/NumberFormat/11.2.3_b': [FAIL],
- 'intl402/NumberFormat/prototype/11.3_a': [FAIL],
- 'intl402/String/prototype/localeCompare/13.1.1_7': [PASS, FAIL],
-
- ##################### DELIBERATE INCOMPATIBILITIES #####################
-
- 'built-ins/Math/exp/S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
-
- # Linux for ia32 (and therefore simulators) default to extended 80 bit
- # floating point formats, so these tests checking 64-bit FP precision fail.
- # The other platforms/arch's pass these tests.
- # We follow the other major JS engines by keeping this default.
- 'language/types/number/S8.5_A2.1': [PASS, FAIL_OK],
- 'language/types/number/S8.5_A2.2': [PASS, FAIL_OK],
-
- ############################ INVALID TESTS #############################
-
- # The reference value calculated by Test262 is incorrect if you run these
- # tests in PST/PDT between first Sunday in March and first Sunday in April.
- # The DST switch was moved in 2007 whereas Test262 bases the reference value
- # on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
- 'built-ins/Date/S15.9.3.1_A5_T1': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T2': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T3': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T4': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T5': [PASS, FAIL_OK],
- 'built-ins/Date/S15.9.3.1_A5_T6': [PASS, FAIL_OK],
-
- # Test makes unjustified assumptions about the number of calls to SortCompare.
- # Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
- 'built-ins/Array/prototype/sort/bug_596_1': [PASS, FAIL_OK],
-
- # Tests do not return boolean.
- 'built-ins/Object/keys/15.2.3.14-1-1': [PASS, FAIL_OK],
- 'built-ins/Object/keys/15.2.3.14-1-2': [PASS, FAIL_OK],
- 'built-ins/Object/keys/15.2.3.14-1-3': [PASS, FAIL_OK],
-
- # Test bug https://github.com/tc39/test262/issues/405
- 'intl402/Collator/prototype/compare/10.3.2_1_c': [PASS, FAIL_OK],
- 'intl402/Collator/prototype/compare/10.3.2_CS_b_NN': [PASS, FAIL_OK],
- 'intl402/Collator/prototype/compare/10.3.2_CS_c_NN': [PASS, FAIL_OK],
- 'intl402/Collator/prototype/compare/10.3.2_CS_d_NN': [PASS, FAIL_OK],
- 'intl402/Date/prototype/13.3.0_7': [PASS, FAIL_OK],
-
- ############################ SKIPPED TESTS #############################
-
- # These tests take a looong time to run.
- 'built-ins/decodeURI/S15.1.3.1_A1.10_T1': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A1.11_T1': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A1.11_T2': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A1.12_T1': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A1.12_T2': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A2.5_T1': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A1.11_T1': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T1': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
- 'built-ins/RegExp/S15.10.2.12_A3_T1': [SKIP],
- 'intl402/9.2.6_4_b': [SKIP],
- 'language/literals/regexp/S7.8.5_A1.1_T2': [SKIP],
- 'language/literals/regexp/S7.8.5_A1.4_T2': [SKIP],
- 'language/literals/regexp/S7.8.5_A2.1_T2': [SKIP],
- 'language/literals/regexp/S7.8.5_A2.4_T2': [SKIP],
- 'language/statements/const/syntax/const-invalid-assignment-next-expression-for': [SKIP],
-}], # ALWAYS
-
-['system == macos', {
- 'intl402/11.3.2_TRP': [FAIL],
- 'intl402/9.2.5_11_g_ii_2': [FAIL],
-}], # system == macos
-
-['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
-
- # TODO(mstarzinger): Causes stack overflow on simulators due to eager
- # compilation of parenthesized function literals. Needs investigation.
- 'language/statements/function/S13.2.1_A1_T1': [SKIP],
-
- # BUG(3251225): Tests that timeout with --nocrankshaft.
- 'built-ins/decodeURI/S15.1.3.1_A2.4_T1': [SKIP],
- 'built-ins/decodeURI/S15.1.3.1_A2.5_T1': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A2.4_T1': [SKIP],
- 'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
- 'built-ins/encodeURI/S15.1.3.3_A2.3_T1': [SKIP],
- 'built-ins/encodeURIComponent/S15.1.3.4_A2.3_T1': [SKIP],
-}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
-]
diff --git a/deps/v8/test/test262-es6/testcfg.py b/deps/v8/test/test262-es6/testcfg.py
deleted file mode 100644
index 88f4ad1297..0000000000
--- a/deps/v8/test/test262-es6/testcfg.py
+++ /dev/null
@@ -1,235 +0,0 @@
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import hashlib
-import os
-import shutil
-import sys
-import tarfile
-import imp
-
-from testrunner.local import statusfile
-from testrunner.local import testsuite
-from testrunner.local import utils
-from testrunner.objects import testcase
-
-# The revision hash needs to be 7 characters?
-TEST_262_ARCHIVE_REVISION = "258d212" # This is the 2015-07-31 revision.
-TEST_262_ARCHIVE_MD5 = "a9b26e19ce582492642af973c8cee826"
-TEST_262_URL = "https://github.com/tc39/test262/tarball/%s"
-TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
-
-TEST_262_SUITE_PATH = ["data", "test"]
-TEST_262_HARNESS_PATH = ["data", "harness"]
-TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
-
-ALL_VARIANT_FLAGS_STRICT = dict(
- (v, [flags + ["--use-strict"] for flags in flag_sets])
- for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
-)
-
-FAST_VARIANT_FLAGS_STRICT = dict(
- (v, [flags + ["--use-strict"] for flags in flag_sets])
- for v, flag_sets in testsuite.FAST_VARIANT_FLAGS.iteritems()
-)
-
-ALL_VARIANT_FLAGS_BOTH = dict(
- (v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
- ALL_VARIANT_FLAGS_STRICT[v]])
- for v in testsuite.ALL_VARIANT_FLAGS
-)
-
-FAST_VARIANT_FLAGS_BOTH = dict(
- (v, [flags for flags in testsuite.FAST_VARIANT_FLAGS[v] +
- FAST_VARIANT_FLAGS_STRICT[v]])
- for v in testsuite.FAST_VARIANT_FLAGS
-)
-
-ALL_VARIANTS = {
- 'nostrict': testsuite.ALL_VARIANT_FLAGS,
- 'strict': ALL_VARIANT_FLAGS_STRICT,
- 'both': ALL_VARIANT_FLAGS_BOTH,
-}
-
-FAST_VARIANTS = {
- 'nostrict': testsuite.FAST_VARIANT_FLAGS,
- 'strict': FAST_VARIANT_FLAGS_STRICT,
- 'both': FAST_VARIANT_FLAGS_BOTH,
-}
-
-class Test262VariantGenerator(testsuite.VariantGenerator):
- def GetFlagSets(self, testcase, variant):
- if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
- variant_flags = FAST_VARIANTS
- else:
- variant_flags = ALL_VARIANTS
-
- test_record = self.suite.GetTestRecord(testcase)
- if "noStrict" in test_record:
- return variant_flags["nostrict"][variant]
- if "onlyStrict" in test_record:
- return variant_flags["strict"][variant]
- return variant_flags["both"][variant]
-
-
-class Test262TestSuite(testsuite.TestSuite):
-
- def __init__(self, name, root):
- super(Test262TestSuite, self).__init__(name, root)
- self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
- self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
- self.harness = [os.path.join(self.harnesspath, f)
- for f in TEST_262_HARNESS_FILES]
- self.harness += [os.path.join(self.root, "harness-adapt.js")]
- self.ParseTestRecord = None
-
- def ListTests(self, context):
- tests = []
- for dirname, dirs, files in os.walk(self.testroot):
- for dotted in [x for x in dirs if x.startswith(".")]:
- dirs.remove(dotted)
- if context.noi18n and "intl402" in dirs:
- dirs.remove("intl402")
- dirs.sort()
- files.sort()
- for filename in files:
- if filename.endswith(".js"):
- testname = os.path.join(dirname[len(self.testroot) + 1:],
- filename[:-3])
- case = testcase.TestCase(self, testname)
- tests.append(case)
- return tests
-
- def GetFlagsForTestCase(self, testcase, context):
- return (testcase.flags + context.mode_flags + self.harness +
- self.GetIncludesForTest(testcase) + ["--harmony"] +
- [os.path.join(self.testroot, testcase.path + ".js")])
-
- def _VariantGeneratorFactory(self):
- return Test262VariantGenerator
-
- def LoadParseTestRecord(self):
- if not self.ParseTestRecord:
- root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
- f = None
- try:
- (f, pathname, description) = imp.find_module("parseTestRecord", [root])
- module = imp.load_module("parseTestRecord", f, pathname, description)
- self.ParseTestRecord = module.parseTestRecord
- except:
- raise ImportError("Cannot load parseTestRecord; you may need to "
- "--download-data for test262")
- finally:
- if f:
- f.close()
- return self.ParseTestRecord
-
- def GetTestRecord(self, testcase):
- if not hasattr(testcase, "test_record"):
- ParseTestRecord = self.LoadParseTestRecord()
- testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
- testcase.path)
- return testcase.test_record
-
- def GetIncludesForTest(self, testcase):
- test_record = self.GetTestRecord(testcase)
- if "includes" in test_record:
- includes = [os.path.join(self.harnesspath, f)
- for f in test_record["includes"]]
- else:
- includes = []
- return includes
-
- def GetSourceForTest(self, testcase):
- filename = os.path.join(self.testroot, testcase.path + ".js")
- with open(filename) as f:
- return f.read()
-
- def IsNegativeTest(self, testcase):
- test_record = self.GetTestRecord(testcase)
- return "negative" in test_record
-
- def IsFailureOutput(self, output, testpath):
- if output.exit_code != 0:
- return True
- return "FAILED!" in output.stdout
-
- def HasUnexpectedOutput(self, testcase):
- outcome = self.GetOutcome(testcase)
- if (statusfile.FAIL_SLOPPY in testcase.outcomes and
- "--use-strict" not in testcase.flags):
- return outcome != statusfile.FAIL
- return not outcome in (testcase.outcomes or [statusfile.PASS])
-
- def DownloadData(self):
- revision = TEST_262_ARCHIVE_REVISION
- archive_url = TEST_262_URL % revision
- archive_name = os.path.join(self.root, "tc39-test262-%s.tar.gz" % revision)
- directory_name = os.path.join(self.root, "data")
- directory_old_name = os.path.join(self.root, "data.old")
-
- # Clobber if the test is in an outdated state, i.e. if there are any other
- # archive files present.
- archive_files = [f for f in os.listdir(self.root)
- if f.startswith("tc39-test262-")]
- if (len(archive_files) > 1 or
- os.path.basename(archive_name) not in archive_files):
- print "Clobber outdated test archives ..."
- for f in archive_files:
- os.remove(os.path.join(self.root, f))
-
- if not os.path.exists(archive_name):
- print "Downloading test data from %s ..." % archive_url
- utils.URLRetrieve(archive_url, archive_name)
- if os.path.exists(directory_name):
- if os.path.exists(directory_old_name):
- shutil.rmtree(directory_old_name)
- os.rename(directory_name, directory_old_name)
- if not os.path.exists(directory_name):
- print "Extracting test262-%s.tar.gz ..." % revision
- md5 = hashlib.md5()
- with open(archive_name, "rb") as f:
- for chunk in iter(lambda: f.read(8192), ""):
- md5.update(chunk)
- print "MD5 hash is %s" % md5.hexdigest()
- if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
- os.remove(archive_name)
- print "MD5 expected %s" % TEST_262_ARCHIVE_MD5
- raise Exception("MD5 hash mismatch of test data file")
- archive = tarfile.open(archive_name, "r:gz")
- if sys.platform in ("win32", "cygwin"):
- # Magic incantation to allow longer path names on Windows.
- archive.extractall(u"\\\\?\\%s" % self.root)
- else:
- archive.extractall(self.root)
- os.rename(os.path.join(self.root, "tc39-test262-%s" % revision),
- directory_name)
-
-
-def GetSuite(name, root):
- return Test262TestSuite(name, root)
diff --git a/deps/v8/test/test262/README b/deps/v8/test/test262/README
index e975fbb436..fe3ab232ba 100644
--- a/deps/v8/test/test262/README
+++ b/deps/v8/test/test262/README
@@ -4,13 +4,13 @@ tests from
https://github.com/tc39/test262
-at revision 365 (hash fbba29f) as 'data' in this directory. Using later
+at hash c6ac390 (2015/07/06 revision) as 'data' in this directory. Using later
version may be possible but the tests are only known to pass (and indeed run)
with that revision.
git clone https://github.com/tc39/test262 data
cd data
- git checkout fbba29f
+ git checkout c6ac390
If you do update to a newer revision you may have to change the test
harness adapter code since it uses internal functionality from the
diff --git a/deps/v8/test/test262/harness-adapt.js b/deps/v8/test/test262/harness-adapt.js
index 52b5de70bd..60c0858f02 100644
--- a/deps/v8/test/test262/harness-adapt.js
+++ b/deps/v8/test/test262/harness-adapt.js
@@ -80,3 +80,12 @@ var ES5Harness = (function() {
registerTest: registerTest
}
})();
+
+function $DONE(arg){
+ if (arg) {
+ print('FAILED! Error: ' + arg);
+ quit(1);
+ }
+
+ quit(0);
+};
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index feed1a3206..94873ee5b3 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -1,4 +1,3 @@
-
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -30,295 +29,663 @@
[ALWAYS, {
############################### BUGS ###################################
- '15.5.4.9_CE': [['no_i18n', SKIP]],
-
# BUG(v8:3455)
- '11.2.3_b': [FAIL],
- '12.2.3_b': [FAIL],
+ 'intl402/11.2.3_b': [FAIL],
+ 'intl402/12.2.3_b': [FAIL],
# BUG(v8:4267)
- '15.2.3.6-4-116': [FAIL],
- '15.2.3.6-4-117': [FAIL],
- '15.2.3.6-4-168': [FAIL],
- '15.2.3.6-4-169': [FAIL],
- '15.2.3.6-4-170': [FAIL],
- '15.2.3.6-4-172': [FAIL],
- '15.2.3.6-4-173': [FAIL],
- '15.2.3.6-4-174': [FAIL],
- '15.2.3.6-4-176': [FAIL],
- '15.2.3.6-4-177': [FAIL],
- '15.2.3.7-6-a-112': [FAIL],
- '15.2.3.7-6-a-113': [FAIL],
- '15.2.3.7-6-a-164': [FAIL],
- '15.2.3.7-6-a-165': [FAIL],
- '15.2.3.7-6-a-166': [FAIL],
- '15.2.3.7-6-a-168': [FAIL],
- '15.2.3.7-6-a-169': [FAIL],
- '15.2.3.7-6-a-170': [FAIL],
- '15.2.3.7-6-a-172': [FAIL],
- '15.2.3.7-6-a-173': [FAIL],
- '15.2.3.7-6-a-175': [FAIL],
- '15.2.3.7-6-a-176': [FAIL],
-
- ############################### ES6 ###################################
- # ES6 allows block-local functions.
- 'Sbp_A1_T1': [PASS, FAIL_OK],
- 'Sbp_A2_T1': [PASS, FAIL_OK],
- 'Sbp_A2_T2': [PASS, FAIL_OK],
- 'Sbp_A3_T1': [PASS, FAIL_OK],
- 'Sbp_A3_T2': [PASS, FAIL_OK],
- 'Sbp_A4_T1': [PASS, FAIL_OK],
- 'Sbp_A4_T2': [PASS, FAIL_OK],
- 'Sbp_A5_T1': [PASS], # Test is broken (strict reference to unbound variable)
- 'Sbp_A5_T2': [PASS, FAIL_OK],
-
- # Passes in ES6 since {__arr} syntax is parsed as object literal.
- 'S12.1_A4_T2': [PASS, FAIL_OK],
- 'S12.6.4_A15': [PASS, FAIL_OK],
-
- # ES6 allows duplicate properties
- '11.1.5-4-4-a-1-s': [FAIL],
- '11.1.5_4-4-b-1': [FAIL],
- '11.1.5_4-4-b-2': [FAIL],
- '11.1.5_4-4-c-1': [FAIL],
- '11.1.5_4-4-c-2': [FAIL],
- '11.1.5_4-4-d-1': [FAIL],
- '11.1.5_4-4-d-2': [FAIL],
- '11.1.5_4-4-d-3': [FAIL],
- '11.1.5_4-4-d-4': [FAIL],
-
- # NativeError has Error as its [[Prototype]]
- '15.2.3.2-2-12': [FAIL],
- '15.2.3.2-2-13': [FAIL],
- '15.2.3.2-2-14': [FAIL],
- '15.2.3.2-2-15': [FAIL],
- '15.2.3.2-2-16': [FAIL],
- '15.2.3.2-2-17': [FAIL],
-
- # Function length properties are configurable in ES6
- '10.1_L15': [FAIL],
- '10.2.2_L15': [FAIL],
- '10.3.2_1_a_L15': [FAIL],
- '10.3.2_L15': [FAIL],
- '10.3.3_L15': [FAIL],
- '11.1_L15': [FAIL],
- '11.2.2_L15': [FAIL],
- '11.3.2_1_a_L15': [FAIL],
- '11.3.2_L15': [FAIL],
- '11.3.3_L15': [FAIL],
- '11.4.1-5-a-28-s': [FAIL],
- '12.1_L15': [FAIL],
- '12.2.2_L15': [FAIL],
- '12.3.2_1_a_L15': [FAIL],
- '12.3.2_L15': [FAIL],
- '12.3.3_L15': [FAIL],
- '13.1.1_L15': [FAIL],
- '13.2-15-1': [FAIL],
- '13.2.1_L15': [FAIL],
- '13.3.1_L15': [FAIL],
- '13.3.2_L15': [FAIL],
- '13.3.3_L15': [FAIL],
- '15.2.3.3-4-186': [FAIL],
- '15.2.3.3-4-187': [FAIL],
- '15.2.3.3-4-191': [FAIL],
- '15.2.3.3-4-194': [FAIL],
- '15.2.3.3-4-201': [FAIL],
- '15.3.3.2-1': [FAIL],
- 'S15.1.2.1_A4.2': [FAIL],
- 'S15.1.2.2_A9.2': [FAIL],
- 'S15.1.2.3_A7.2': [FAIL],
- 'S15.1.2.4_A2.2': [FAIL],
- 'S15.1.2.5_A2.2': [FAIL],
- 'S15.1.3.1_A5.2': [FAIL],
- 'S15.1.3.2_A5.2': [FAIL],
- 'S15.1.3.3_A5.2': [FAIL],
- 'S15.1.3.4_A5.2': [FAIL],
- 'S15.10.6.2_A9': [FAIL],
- 'S15.10.6.3_A9': [FAIL],
- 'S15.10.6.4_A9': [FAIL],
- 'S15.2.4.2_A9': [FAIL],
- 'S15.2.4.3_A9': [FAIL],
- 'S15.2.4.4_A9': [FAIL],
- 'S15.2.4.5_A9': [FAIL],
- 'S15.2.4.6_A9': [FAIL],
- 'S15.2.4.7_A9': [FAIL],
- 'S15.3.4.2_A9': [FAIL],
- 'S15.3.4.3_A9': [FAIL],
- 'S15.3.4.4_A9': [FAIL],
- 'S15.3.5.1_A2_T1': [FAIL],
- 'S15.3.5.1_A2_T2': [FAIL],
- 'S15.3.5.1_A2_T3': [FAIL],
- 'S15.4.3_A2.2': [FAIL],
- 'S15.4.4.10_A5.2': [FAIL],
- 'S15.4.4.11_A7.2': [FAIL],
- 'S15.4.4.12_A5.2': [FAIL],
- 'S15.4.4.13_A5.2': [FAIL],
- 'S15.4.4.2_A4.2': [FAIL],
- 'S15.4.4.3_A4.2': [FAIL],
- 'S15.4.4.4_A4.2': [FAIL],
- 'S15.4.4.5_A6.2': [FAIL],
- 'S15.4.4.6_A5.2': [FAIL],
- 'S15.4.4.7_A6.2': [FAIL],
- 'S15.4.4.8_A5.2': [FAIL],
- 'S15.4.4.9_A5.2': [FAIL],
- 'S15.5.4.10_A9': [FAIL],
- 'S15.5.4.11_A9': [FAIL],
- 'S15.5.4.12_A9': [FAIL],
- 'S15.5.4.13_A9': [FAIL],
- 'S15.5.4.14_A9': [FAIL],
- 'S15.5.4.15_A9': [FAIL],
- 'S15.5.4.16_A9': [FAIL],
- 'S15.5.4.17_A9': [FAIL],
- 'S15.5.4.18_A9': [FAIL],
- 'S15.5.4.19_A9': [FAIL],
- 'S15.5.4.4_A9': [FAIL],
- 'S15.5.4.5_A9': [FAIL],
- 'S15.5.4.6_A9': [FAIL],
- 'S15.5.4.7_A9': [FAIL],
- 'S15.5.4.8_A9': [FAIL],
- 'S15.5.4.9_A9': [FAIL],
- 'S15.9.4.2_A3_T2': [FAIL],
- 'S15.9.4.3_A3_T2': [FAIL],
- 'S15.9.5.10_A3_T2': [FAIL],
- 'S15.9.5.11_A3_T2': [FAIL],
- 'S15.9.5.12_A3_T2': [FAIL],
- 'S15.9.5.13_A3_T2': [FAIL],
- 'S15.9.5.14_A3_T2': [FAIL],
- 'S15.9.5.15_A3_T2': [FAIL],
- 'S15.9.5.16_A3_T2': [FAIL],
- 'S15.9.5.17_A3_T2': [FAIL],
- 'S15.9.5.18_A3_T2': [FAIL],
- 'S15.9.5.19_A3_T2': [FAIL],
- 'S15.9.5.1_A3_T2': [FAIL],
- 'S15.9.5.20_A3_T2': [FAIL],
- 'S15.9.5.21_A3_T2': [FAIL],
- 'S15.9.5.22_A3_T2': [FAIL],
- 'S15.9.5.23_A3_T2': [FAIL],
- 'S15.9.5.24_A3_T2': [FAIL],
- 'S15.9.5.25_A3_T2': [FAIL],
- 'S15.9.5.26_A3_T2': [FAIL],
- 'S15.9.5.27_A3_T2': [FAIL],
- 'S15.9.5.28_A3_T2': [FAIL],
- 'S15.9.5.29_A3_T2': [FAIL],
- 'S15.9.5.2_A3_T2': [FAIL],
- 'S15.9.5.30_A3_T2': [FAIL],
- 'S15.9.5.31_A3_T2': [FAIL],
- 'S15.9.5.32_A3_T2': [FAIL],
- 'S15.9.5.33_A3_T2': [FAIL],
- 'S15.9.5.34_A3_T2': [FAIL],
- 'S15.9.5.35_A3_T2': [FAIL],
- 'S15.9.5.36_A3_T2': [FAIL],
- 'S15.9.5.37_A3_T2': [FAIL],
- 'S15.9.5.38_A3_T2': [FAIL],
- 'S15.9.5.39_A3_T2': [FAIL],
- 'S15.9.5.3_A3_T2': [FAIL],
- 'S15.9.5.40_A3_T2': [FAIL],
- 'S15.9.5.41_A3_T2': [FAIL],
- 'S15.9.5.42_A3_T2': [FAIL],
- 'S15.9.5.4_A3_T2': [FAIL],
- 'S15.9.5.5_A3_T2': [FAIL],
- 'S15.9.5.6_A3_T2': [FAIL],
- 'S15.9.5.7_A3_T2': [FAIL],
- 'S15.9.5.8_A3_T2': [FAIL],
- 'S15.9.5.9_A3_T2': [FAIL],
- '15.3.4.5-15-2': [FAIL],
-
- # Object.getPrototypeOf wraps primitive values in ES6.
- '15.2.3.2-1': [FAIL],
- '15.2.3.2-1-3': [FAIL],
- '15.2.3.2-1-4': [FAIL],
-
- # Object.getOwnPropertyDescriptor wraps primitives in ES6.
- '15.2.3.3-1': [FAIL],
- '15.2.3.3-1-3': [FAIL],
- '15.2.3.3-1-4': [FAIL],
-
- # Function restricted "caller" and "arguments" properties are defined only on
- # the intrinsic %FunctionPrototype% (and sloppy functions) in ES6
- '13.2-29-s': [FAIL],
- '13.2-30-s': [FAIL],
- '13.2-31-s': [FAIL],
- '13.2-32-s': [FAIL],
- '13.2-33-s': [FAIL],
- '13.2-34-s': [FAIL],
- '13.2-35-s': [FAIL],
- '13.2-36-s': [FAIL],
- 'S13.2.3_A1': [FAIL],
- '15.3.4.5-20-1': [FAIL],
- '15.3.4.5-20-4': [FAIL],
- '15.3.4.5-20-5': [FAIL],
- '15.3.4.5-21-1': [FAIL],
- '15.3.4.5-21-4': [FAIL],
- '15.3.4.5-21-5': [FAIL],
-
- # Object.freeze(O), Object.seal(O), and Object.preventExtensions(O),
- # Object.isFrozen(O), Object.isSealed(O), and Object.isExtensible(O) no longer
- # throw when passed a non-object value in ES6.
- '15.2.3.8-1': [FAIL],
- '15.2.3.8-1-1': [FAIL],
- '15.2.3.8-1-2': [FAIL],
- '15.2.3.8-1-3': [FAIL],
- '15.2.3.8-1-4': [FAIL],
- '15.2.3.9-1': [FAIL],
- '15.2.3.9-1-1': [FAIL],
- '15.2.3.9-1-2': [FAIL],
- '15.2.3.9-1-3': [FAIL],
- '15.2.3.9-1-4': [FAIL],
- '15.2.3.10-1': [FAIL],
- '15.2.3.10-1-1': [FAIL],
- '15.2.3.10-1-2': [FAIL],
- '15.2.3.10-1-3': [FAIL],
- '15.2.3.10-1-4': [FAIL],
- '15.2.3.11-1': [FAIL],
- '15.2.3.12-1': [FAIL],
- '15.2.3.12-1-1': [FAIL],
- '15.2.3.12-1-2': [FAIL],
- '15.2.3.12-1-3': [FAIL],
- '15.2.3.12-1-4': [FAIL],
- '15.2.3.13-1': [FAIL],
- '15.2.3.13-1-1': [FAIL],
- '15.2.3.13-1-2': [FAIL],
- '15.2.3.13-1-3': [FAIL],
- '15.2.3.13-1-4': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-112': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-113': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-164': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-165': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-166': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-168': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-169': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-170': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-172': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-173': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-175': [FAIL],
+ 'built-ins/Object/defineProperties/15.2.3.7-6-a-176': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-116': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-117': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-168': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-169': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-170': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-172': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-173': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-174': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-176': [FAIL],
+ 'built-ins/Object/defineProperty/15.2.3.6-4-177': [FAIL],
+
+ # Unicode canonicalization is not available with i18n turned off.
+ 'built-ins/String/prototype/localeCompare/15.5.4.9_CE': [['no_i18n', SKIP]],
+
+ ###################### NEEDS INVESTIGATION #######################
+
+ # Possibly same cause as S8.5_A2.1, below: floating-point tests.
+ 'built-ins/Math/cos/S15.8.2.7_A7': [PASS, FAIL_OK],
+ 'built-ins/Math/sin/S15.8.2.16_A7': [PASS, FAIL_OK],
+ 'built-ins/Math/tan/S15.8.2.18_A7': [PASS, FAIL_OK],
+
+ # This is an incompatibility between ES5 and V8 on enumerating
+ # shadowed elements in a for..in loop.
+ # https://code.google.com/p/v8/issues/detail?id=705
+ 'language/statements/for-in/12.6.4-2': [PASS, FAIL_OK],
+
+ ###################### MISSING ES6 FEATURES #######################
+
+ # Const is still interpreted as legacy const in sloppy mode
+ # https://code.google.com/p/v8/issues/detail?id=3305
+ 'language/block-scope/shadowing/const-declaration-shadowing-catch-parameter': [PASS, FAIL_SLOPPY],
+ 'language/block-scope/shadowing/const-declarations-shadowing-parameter-name-let-const-and-var-variables': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/block-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/block-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/block-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/function-local-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/function-local-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/function-local-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/global-closure-get-before-initialization': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/global-use-before-initialization-in-declaration-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/global-use-before-initialization-in-prior-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/block-scope-syntax-const-declarations-mixed-with-without-initialiser': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/block-scope-syntax-const-declarations-mixed-without-with-initialiser': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/block-scope-syntax-const-declarations-without-initialiser': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/const': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/const-invalid-assignment-statement-body-for-in': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/const-invalid-assignment-statement-body-for-of': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/const-outer-inner-let-bindings': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-do-statement-while-expression': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-for-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-if-expression-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-label-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/with-initializer-while-expression-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-case-expression-statement-list': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-default-statement-list': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-do-statement-while-expression': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-for-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-if-expression-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-if-expression-statement-else-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-label-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/const/syntax/without-initializer-while-expression-statement': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-in/const-bound-names-fordecl-tdz-for-in': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-in/const-fresh-binding-per-iteration-for-in': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-of/const-bound-names-fordecl-tdz-for-of': [PASS, FAIL_SLOPPY],
+ 'language/statements/for-of/const-fresh-binding-per-iteration-for-of': [PASS, FAIL_SLOPPY],
+
+ # Functions in blocks are var-declared and hoisted in sloppy mode
+ # https://code.google.com/p/v8/issues/detail?id=3305
+ 'language/block-scope/shadowing/dynamic-lookup-from-closure': [PASS, FAIL_SLOPPY],
+ 'language/block-scope/shadowing/lookup-from-closure': [PASS, FAIL_SLOPPY],
+ 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
+ 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-var': [PASS, FAIL_SLOPPY],
+ 'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-var-with-function-declaration': [PASS, FAIL_SLOPPY],
+ 'language/statements/let/block-local-closure-set-before-initialization': [PASS, FAIL_SLOPPY],
+
+ # https://code.google.com/p/v8/issues/detail?id=4405
+ 'language/block-scope/leave/outermost-binding-updated-in-catch-block-nested-block-let-declaration-unseen-outside-of-block': [PASS, FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3305
+ # This times out in sloppy mode because sloppy const assignment does not throw.
+ 'language/statements/const/syntax/const-invalid-assignment-next-expression-for': [PASS, FAIL, TIMEOUT],
+
+ # https://code.google.com/p/v8/issues/detail?id=4403
+ 'language/statements/let/syntax/identifier-let-disallowed-as-boundname': [PASS, FAIL_SLOPPY],
+
+ # Number/Boolean.prototype is a plain object in ES6
+ # https://code.google.com/p/v8/issues/detail?id=4001
+ 'built-ins/Boolean/prototype/S15.6.3.1_A1': [FAIL],
+ 'built-ins/Boolean/prototype/S15.6.4_A1': [FAIL],
+ 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T1': [FAIL],
+ 'built-ins/Boolean/prototype/toString/S15.6.4.2_A1_T2': [FAIL],
+ 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T1': [FAIL],
+ 'built-ins/Boolean/prototype/valueOf/S15.6.4.3_A1_T2': [FAIL],
+ 'built-ins/Number/15.7.4-1': [FAIL],
+ 'built-ins/Number/prototype/S15.7.3.1_A2_*': [FAIL],
+ 'built-ins/Number/prototype/S15.7.3.1_A3': [FAIL],
+ 'built-ins/Number/prototype/S15.7.4_A1': [FAIL],
+ 'built-ins/Number/prototype/toFixed/S15.7.4.5_A1.1_T01': [FAIL],
+ 'built-ins/Number/prototype/toString/S15.7.4.2_A1_*': [FAIL],
+ 'built-ins/Number/prototype/toString/S15.7.4.2_A2_*': [FAIL],
+ 'built-ins/Number/prototype/valueOf/S15.7.4.4_A1_*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3087
+ 'built-ins/Array/prototype/every/15.4.4.16-3-12': [FAIL],
+ 'built-ins/Array/prototype/every/15.4.4.16-3-14': [FAIL],
+ 'built-ins/Array/prototype/every/15.4.4.16-3-25': [FAIL],
+ 'built-ins/Array/prototype/every/15.4.4.16-3-29': [FAIL],
+ 'built-ins/Array/prototype/every/15.4.4.16-3-7': [FAIL],
+ 'built-ins/Array/prototype/every/15.4.4.16-3-8': [FAIL],
+ 'built-ins/Array/prototype/filter/15.4.4.20-3-12': [FAIL],
+ 'built-ins/Array/prototype/filter/15.4.4.20-3-25': [FAIL],
+ 'built-ins/Array/prototype/filter/15.4.4.20-3-7': [FAIL],
+ 'built-ins/Array/prototype/forEach/15.4.4.18-3-12': [FAIL],
+ 'built-ins/Array/prototype/forEach/15.4.4.18-3-25': [FAIL],
+ 'built-ins/Array/prototype/forEach/15.4.4.18-3-7': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-12': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-14': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-25': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-28': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-29': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-7': [FAIL],
+ 'built-ins/Array/prototype/indexOf/15.4.4.14-3-8': [FAIL],
+ 'built-ins/Array/prototype/join/S15.4.4.5_A4_T3': [FAIL],
+ 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-12': [FAIL],
+ 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-25': [FAIL],
+ 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-28': [FAIL],
+ 'built-ins/Array/prototype/lastIndexOf/15.4.4.15-3-7': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-12': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-14': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-25': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-28': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-29': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-7': [FAIL],
+ 'built-ins/Array/prototype/map/15.4.4.19-3-8': [FAIL],
+ 'built-ins/Array/prototype/pop/S15.4.4.6_A2_T2': [FAIL],
+ 'built-ins/Array/prototype/pop/S15.4.4.6_A3_T1': [FAIL],
+ 'built-ins/Array/prototype/pop/S15.4.4.6_A3_T2': [FAIL],
+ 'built-ins/Array/prototype/pop/S15.4.4.6_A3_T3': [FAIL],
+ 'built-ins/Array/prototype/push/S15.4.4.7_A2_T2': [FAIL],
+ 'built-ins/Array/prototype/push/S15.4.4.7_A4_T1': [FAIL],
+ 'built-ins/Array/prototype/push/S15.4.4.7_A4_T3': [FAIL],
+ 'built-ins/Array/prototype/reduce/15.4.4.21-3-12': [FAIL],
+ 'built-ins/Array/prototype/reduce/15.4.4.21-3-25': [FAIL],
+ 'built-ins/Array/prototype/reduce/15.4.4.21-3-7': [FAIL],
+ 'built-ins/Array/prototype/reduceRight/15.4.4.22-3-12': [FAIL],
+ 'built-ins/Array/prototype/reduceRight/15.4.4.22-3-25': [FAIL],
+ 'built-ins/Array/prototype/reduceRight/15.4.4.22-3-7': [FAIL],
+ 'built-ins/Array/prototype/reverse/S15.4.4.8_A3_T3': [FAIL],
+ 'built-ins/Array/prototype/shift/S15.4.4.9_A3_T3': [FAIL],
+ 'built-ins/Array/prototype/slice/S15.4.4.10_A3_T1': [FAIL],
+ 'built-ins/Array/prototype/slice/S15.4.4.10_A3_T2': [FAIL],
+ 'built-ins/Array/prototype/slice/S15.4.4.10_A3_T3': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-12': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-14': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-25': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-28': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-29': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-7': [FAIL],
+ 'built-ins/Array/prototype/some/15.4.4.17-3-8': [FAIL],
+ 'built-ins/Array/prototype/sort/S15.4.4.11_A4_T3': [FAIL],
+ 'built-ins/Array/prototype/splice/S15.4.4.12_A3_T1': [FAIL],
+ 'built-ins/Array/prototype/splice/S15.4.4.12_A3_T3': [FAIL],
+ 'built-ins/Array/prototype/unshift/S15.4.4.13_A3_T2': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=1543
+ 'built-ins/Proxy/*': [FAIL],
+ 'built-ins/Array/prototype/find/Array.prototype.find_callable-Proxy-1': [FAIL],
+ 'built-ins/Array/prototype/find/Array.prototype.find_callable-Proxy-2': [FAIL],
+ 'built-ins/Object/assign/source-own-prop-desc-missing': [FAIL],
+ 'built-ins/Object/assign/source-own-prop-error': [FAIL],
+ 'built-ins/Object/assign/source-own-prop-keys-error': [FAIL],
+ 'built-ins/Object/setPrototypeOf/set-error': [FAIL],
+ 'language/expressions/object/prop-def-id-eval-error-2': [FAIL],
+ 'language/statements/for-of/iterator-as-proxy': [FAIL],
+ 'language/statements/for-of/iterator-next-result-type': [FAIL],
+ 'built-ins/Array/of/return-abrupt-from-data-property-using-proxy': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4093
+ 'built-ins/Array/symbol-species': [FAIL],
+ 'built-ins/Array/symbol-species-name': [FAIL],
+ 'built-ins/ArrayBuffer/symbol-species': [FAIL],
+ 'built-ins/ArrayBuffer/symbol-species-name': [FAIL],
+ 'built-ins/Map/symbol-species': [FAIL],
+ 'built-ins/Map/symbol-species-name': [FAIL],
+ 'built-ins/Promise/Symbol.species/prop-desc': [FAIL],
+ 'built-ins/Promise/Symbol.species/return-value': [FAIL],
+ 'built-ins/Promise/all/species-get-error': [FAIL],
+ 'built-ins/Promise/prototype/then/ctor-custom': [FAIL],
+ 'built-ins/Promise/race/species-get-error': [FAIL],
+ 'built-ins/Promise/symbol-species': [FAIL],
+ 'built-ins/Promise/symbol-species-name': [FAIL],
+ 'built-ins/RegExp/symbol-species': [FAIL],
+ 'built-ins/RegExp/symbol-species-name': [FAIL],
+ 'built-ins/Set/symbol-species': [FAIL],
+ 'built-ins/Set/symbol-species-name': [FAIL],
+ 'built-ins/Symbol/species/basic': [FAIL],
+ 'built-ins/Symbol/species/builtin-getter-name': [FAIL],
+ 'built-ins/Symbol/species/subclassing': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4242
+ 'built-ins/Date/15.9.1.15-1': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4002
+ 'built-ins/Error/prototype/S15.11.4_A2': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4163
+ 'built-ins/GeneratorPrototype/next/context-constructor-invocation': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3566
+ 'built-ins/Array/from/iter-map-fn-err': [FAIL],
+ 'built-ins/Array/from/iter-set-elem-prop-err': [FAIL],
+ 'built-ins/Map/iterator-close-after-set-failure': [FAIL],
+ 'built-ins/Map/iterator-item-first-entry-returns-abrupt': [FAIL],
+ 'built-ins/Map/iterator-item-second-entry-returns-abrupt': [FAIL],
+ 'built-ins/Map/iterator-items-are-not-object-close-iterator': [FAIL],
+ 'built-ins/Promise/all/iter-close': [FAIL],
+ 'built-ins/Set/set-iterator-close-after-add-failure': [FAIL],
+ 'built-ins/WeakMap/iterator-close-after-set-failure': [FAIL],
+ 'built-ins/WeakMap/iterator-item-first-entry-returns-abrupt': [FAIL],
+ 'built-ins/WeakMap/iterator-item-second-entry-returns-abrupt': [FAIL],
+ 'built-ins/WeakMap/iterator-items-are-not-object-close-iterator': [FAIL],
+ 'built-ins/WeakSet/iterator-close-after-add-failure': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3715
+ 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-212': [FAIL],
+ 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-213': [FAIL],
+ 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-214': [FAIL],
+ 'built-ins/Object/getOwnPropertyDescriptor/15.2.3.3-4-215': [FAIL],
+ 'built-ins/RegExp/prototype/global/15.10.7.2-1': [FAIL],
+ 'built-ins/RegExp/prototype/global/15.10.7.2-2': [FAIL],
+ 'built-ins/RegExp/prototype/global/S15.10.7.2_A9': [FAIL],
+ 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-1': [FAIL],
+ 'built-ins/RegExp/prototype/ignoreCase/15.10.7.3-2': [FAIL],
+ 'built-ins/RegExp/prototype/ignoreCase/S15.10.7.3_A9': [FAIL],
+ 'built-ins/RegExp/prototype/lastIndex/15.10.7.5-1': [FAIL],
+ 'built-ins/RegExp/prototype/lastIndex/15.10.7.5-2': [FAIL],
+ 'built-ins/RegExp/prototype/multiline/15.10.7.4-1': [FAIL],
+ 'built-ins/RegExp/prototype/multiline/15.10.7.4-2': [FAIL],
+ 'built-ins/RegExp/prototype/multiline/S15.10.7.4_A9': [FAIL],
+ 'built-ins/RegExp/prototype/source/15.10.7.1-1': [FAIL],
+ 'built-ins/RegExp/prototype/source/15.10.7.1-2': [FAIL],
+ 'built-ins/RegExp/prototype/source/S15.10.7.1_A9': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4243
+ 'built-ins/Promise/race/S25.4.4.3_A3.1_T2': [FAIL],
+ 'built-ins/Promise/reject/S25.4.4.4_A3.1_T1': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4341
+ 'built-ins/Promise/resolve/arg-uniq-ctor': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4119
+ 'built-ins/RegExp/15.10.4.1-1': [FAIL],
+ 'built-ins/RegExp/S15.10.3.1_A2_T1': [FAIL],
+ 'built-ins/RegExp/S15.10.3.1_A2_T2': [FAIL],
+ 'built-ins/RegExp/S15.10.4.1_A2_T1': [FAIL],
+ 'built-ins/RegExp/S15.10.4.1_A2_T2': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4003
+ 'built-ins/RegExp/prototype/15.10.6': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4244
+ 'built-ins/RegExp/prototype/exec/S15.10.6.2_A5_T3': [FAIL],
+ 'built-ins/RegExp/prototype/test/S15.10.6.3_A1_T22': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4006
+ 'built-ins/String/prototype/S15.5.4_A1': [FAIL],
+ 'built-ins/String/prototype/S15.5.4_A2': [FAIL],
+ 'built-ins/String/prototype/S15.5.4_A3': [FAIL],
+ 'language/expressions/property-accessors/S11.2.1_A4_T5': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4245
+ 'built-ins/String/prototype/split/S15.5.4.14_A2_T37': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4348
+ 'built-ins/String/prototype/Symbol.iterator/this-val-non-obj-coercible': [FAIL],
+
+ # The order of adding the name property is wrong
+ # https://code.google.com/p/v8/issues/detail?id=4199
+ 'language/computed-property-names/class/static/method-number': [FAIL, FAIL_SLOPPY],
+ 'language/computed-property-names/class/static/method-symbol': [FAIL, FAIL_SLOPPY],
+ 'language/computed-property-names/class/static/method-string': [FAIL, FAIL_SLOPPY],
+
+ # This should work as soon as rest parameters are re-implemented via desaguring.
+ 'language/expressions/arrow-function/syntax/early-errors/arrowparameters-cover-no-duplicates-rest': [PASS, FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=2160
+ 'language/expressions/arrow-function/syntax/arrowparameters-cover-initialize-1': [FAIL],
+ 'language/expressions/arrow-function/syntax/arrowparameters-cover-initialize-2': [FAIL],
+ 'language/expressions/object/method-definition/generator-super-prop-param': [FAIL],
+ 'language/expressions/object/method-definition/name-param-init-yield': [FAIL],
+ 'language/expressions/object/method-definition/name-super-prop-param': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3673
+ 'language/statements/class/definition/basics': [FAIL],
+
+ # Destructuring
+ # https://code.google.com/p/v8/issues/detail?id=811
+ 'language/statements/for-of/body-dstr-assign': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3566
+ 'language/statements/for-of/body-dstr-assign-error': [FAIL],
+ 'language/statements/for-of/body-put-error': [FAIL],
+ 'language/statements/for-of/generator-close-via-break': [FAIL],
+ 'language/statements/for-of/generator-close-via-return': [FAIL],
+ 'language/statements/for-of/generator-close-via-throw': [FAIL],
+ 'language/statements/for-of/iterator-close-get-method-error': [FAIL],
+ 'language/statements/for-of/iterator-close-non-object': [FAIL],
+ 'language/statements/for-of/iterator-close-via-break': [FAIL],
+ 'language/statements/for-of/iterator-close-via-return': [FAIL],
+ 'language/statements/for-of/iterator-close-via-throw': [FAIL],
+
+ # We do not expose Array.prototype.values
+ # https://code.google.com/p/v8/issues/detail?id=4247
+ 'built-ins/Array/prototype/Symbol.iterator': [FAIL],
+ 'built-ins/Array/prototype/values/returns-iterator': [FAIL],
+ 'built-ins/Array/prototype/values/returns-iterator-from-object': [FAIL],
+ 'built-ins/Array/prototype/values/prop-desc': [FAIL],
+ 'built-ins/Array/prototype/values/name': [FAIL],
+ 'built-ins/Array/prototype/values/length': [FAIL],
+ 'built-ins/Array/prototype/values/iteration': [FAIL],
+ 'built-ins/Array/prototype/values/iteration-mutable': [FAIL],
+
+ #https://code.google.com/p/v8/issues/detail?id=3983
+ 'language/expressions/generators/yield-as-function-expression-binding-identifier': [FAIL],
+ 'language/expressions/generators/yield-as-generator-expression-binding-identifier': [FAIL],
+ 'language/expressions/object/method-definition/generator-argSuperProperty': [FAIL],
+ 'language/expressions/object/method-definition/yield-as-function-expression-binding-identifier': [FAIL],
+ 'language/statements/generators/yield-as-function-expression-binding-identifier': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3566
+ 'built-ins/GeneratorPrototype/return/from-state-completed': [FAIL],
+ 'built-ins/GeneratorPrototype/return/from-state-suspended-start': [FAIL],
+ 'built-ins/GeneratorPrototype/return/property-descriptor': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-catch-before-try': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-catch-following-catch': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-catch-within-catch': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-catch-within-try': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-before-try': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-following-finally': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-catch': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-finally': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-inner-try': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-outer-try-after-nested': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-nested-try-catch-within-outer-try-before-nested': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-within-finally': [FAIL],
+ 'built-ins/GeneratorPrototype/return/try-finally-within-try': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=811
+ 'language/expressions/assignment/destructuring/*': [SKIP],
+
+ # https://code.google.com/p/v8/issues/detail?id=4248
+ 'language/expressions/compound-assignment/S11.13.2_A5.*': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A6.*': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.10_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.11_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.1_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.2_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.3_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.4_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.5_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.6_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.7_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.8_T4': [FAIL],
+ 'language/expressions/compound-assignment/S11.13.2_A7.9_T4': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4249
+ 'language/expressions/assignment/S11.13.1_A7_T1': [FAIL],
+ 'language/expressions/assignment/S11.13.1_A7_T2': [FAIL],
+ 'language/expressions/assignment/S11.13.1_A7_T3': [FAIL],
+ 'language/expressions/postfix-increment/S11.3.1_A6_T3': [FAIL],
+ 'language/expressions/postfix-decrement/S11.3.2_A6_T3': [FAIL],
+ 'language/expressions/prefix-decrement/S11.4.5_A6_T3': [FAIL],
+ 'language/expressions/prefix-increment/S11.4.4_A6_T3': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4250
+ 'language/expressions/assignment/S11.13.1_A5*': [FAIL],
+ 'language/expressions/assignment/S11.13.1_A6*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=3699
+ 'built-ins/Function/instance-name': [FAIL],
+ 'built-ins/GeneratorFunction/instance-name': [FAIL],
+ 'language/expressions/assignment/fn-name-arrow': [FAIL],
+ 'language/expressions/assignment/fn-name-class': [FAIL],
+ 'language/expressions/assignment/fn-name-cover': [FAIL],
+ 'language/expressions/assignment/fn-name-fn': [FAIL],
+ 'language/expressions/assignment/fn-name-gen': [FAIL],
+ 'language/expressions/assignment/fn-name-lhs-cover': [FAIL],
+ 'language/expressions/assignment/fn-name-lhs-member': [FAIL],
+ 'language/expressions/class/name': [FAIL],
+ 'language/expressions/function/name': [FAIL],
+ 'language/expressions/generators/implicit-name': [FAIL],
+ 'language/expressions/generators/name': [FAIL],
+ 'language/expressions/generators/name-property-descriptor': [FAIL],
+ 'language/expressions/object/fn-name-accessor-get': [FAIL],
+ 'language/expressions/object/fn-name-accessor-set': [FAIL],
+ 'language/expressions/object/fn-name-arrow': [FAIL],
+ 'language/expressions/object/fn-name-class': [FAIL],
+ 'language/expressions/object/fn-name-cover': [FAIL],
+ 'language/expressions/object/fn-name-fn': [FAIL],
+ 'language/expressions/object/fn-name-gen': [FAIL],
+ 'language/expressions/object/fn-name-lhs-cover': [FAIL],
+ 'language/expressions/object/fn-name-lhs-member': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-accessor-get': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-accessor-set': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-arrow': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-class': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-cover': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-fn': [FAIL],
+ 'language/expressions/object/method-definition/fn-name-gen': [FAIL],
+ 'language/statements/class/definition/fn-name-accessor-get': [FAIL],
+ 'language/statements/class/definition/fn-name-accessor-set': [FAIL],
+ 'language/statements/class/definition/fn-name-gen-method': [FAIL],
+ 'language/statements/class/definition/fn-name-method': [FAIL],
+ 'language/statements/const/fn-name-arrow': [FAIL],
+ 'language/statements/const/fn-name-class': [FAIL],
+ 'language/statements/const/fn-name-cover': [FAIL],
+ 'language/statements/const/fn-name-fn': [FAIL],
+ 'language/statements/const/fn-name-gen': [FAIL],
+ 'language/statements/let/fn-name-arrow': [FAIL],
+ 'language/statements/let/fn-name-class': [FAIL],
+ 'language/statements/let/fn-name-cover': [FAIL],
+ 'language/statements/let/fn-name-fn': [FAIL],
+ 'language/statements/let/fn-name-gen': [FAIL],
+ 'language/statements/variable/fn-name-arrow': [FAIL],
+ 'language/statements/variable/fn-name-class': [FAIL],
+ 'language/statements/variable/fn-name-cover': [FAIL],
+ 'language/statements/variable/fn-name-fn': [FAIL],
+ 'language/statements/variable/fn-name-gen': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4251
+ 'language/expressions/postfix-increment/S11.3.1_A5_T1': [FAIL],
+ 'language/expressions/postfix-increment/S11.3.1_A5_T2': [FAIL],
+ 'language/expressions/postfix-increment/S11.3.1_A5_T3': [FAIL],
+ 'language/expressions/postfix-increment/S11.3.1_A5_T4': [FAIL],
+ 'language/expressions/postfix-increment/S11.3.1_A5_T5': [FAIL],
+ 'language/expressions/postfix-decrement/S11.3.2_A5_*': [FAIL],
+ 'language/expressions/prefix-decrement/S11.4.5_A5_*': [FAIL],
+ 'language/expressions/prefix-increment/S11.4.4_A5_*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4253
+ 'language/asi/S7.9_A5.7_T1': [PASS, FAIL_OK],
+
+ # https://code.google.com/p/v8/issues/detail?id=3761
+ 'language/expressions/object/method-definition/generator-name-prop-symbol': [FAIL],
+ 'language/expressions/object/method-definition/name-name-prop-symbol': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=2952
+ 'built-ins/RegExp/prototype/exec/u-lastindex-adv': [FAIL],
+ 'built-ins/RegExp/prototype/exec/u-captured-value': [FAIL],
+ 'built-ins/RegExp/prototype/exec/u-lastindex-value': [FAIL],
+ 'built-ins/RegExp/prototype/test/u-captured-value': [FAIL],
+ 'built-ins/RegExp/prototype/test/u-lastindex-adv': [FAIL],
+ 'built-ins/RegExp/prototype/test/u-lastindex-value': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/length': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/name': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/prop-desc': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-invald-obj': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-non-obj': [FAIL],
+ 'built-ins/RegExp/prototype/unicode/this-regexp': [FAIL],
+ 'built-ins/RegExp/unicode_identity_escape': [FAIL],
+ 'language/literals/regexp/u-unicode-esc': [FAIL],
+ 'language/literals/regexp/u-surrogate-pairs': [FAIL],
+ 'language/literals/regexp/u-case-mapping': [FAIL],
+ 'language/literals/regexp/u-astral': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4342
+ 'built-ins/RegExp/prototype/exec/get-sticky-coerce': [FAIL],
+ 'built-ins/RegExp/prototype/exec/get-sticky-err': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex-no-write': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-return': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-fail-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-init-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/exec/y-set-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/prop-desc': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/this-invalid-obj': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/this-non-obj': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/this-regexp': [FAIL],
+ 'built-ins/RegExp/prototype/test/get-sticky-coerce': [FAIL],
+ 'built-ins/RegExp/prototype/test/get-sticky-err': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-lastindex-no-write': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-return': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-fail-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-init-lastindex': [FAIL],
+ 'built-ins/RegExp/prototype/test/y-set-lastindex': [FAIL],
+ 'built-ins/RegExp/valid-flags-y': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4305
+ 'built-ins/RegExp/prototype/Symbol.match/*': [FAIL],
+ 'built-ins/String/prototype/endsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
+ 'built-ins/String/prototype/includes/return-abrupt-from-searchstring-regexp-test': [FAIL],
+ 'built-ins/String/prototype/startsWith/return-abrupt-from-searchstring-regexp-test': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4343
+ 'built-ins/RegExp/prototype/Symbol.replace/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4344
+ 'built-ins/RegExp/prototype/Symbol.search/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4345
+ 'built-ins/RegExp/prototype/Symbol.split/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4346
+ 'built-ins/RegExp/prototype/flags/*': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4347
+ 'built-ins/RegExp/prototype/global/name': [FAIL],
+ 'built-ins/RegExp/prototype/ignoreCase/name': [FAIL],
+ 'built-ins/RegExp/prototype/multiline/name': [FAIL],
+ 'built-ins/RegExp/prototype/source/name': [FAIL],
+ 'built-ins/RegExp/prototype/sticky/name': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4360
+ 'intl402/Collator/10.1.1_1': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
+ 'intl402/NumberFormat/11.1.1_1': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4361
+ 'intl402/Collator/10.1.1_a': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=1972
+ 'language/identifiers/val-break-via-escape-hex': [FAIL],
+ 'language/identifiers/val-break-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-case-via-escape-hex': [FAIL],
+ 'language/identifiers/val-case-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-catch-via-escape-hex': [FAIL],
+ 'language/identifiers/val-catch-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-class-via-escape-hex': [FAIL],
+ 'language/identifiers/val-class-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-const-via-escape-hex': [FAIL],
+ 'language/identifiers/val-const-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-continue-via-escape-hex': [FAIL],
+ 'language/identifiers/val-continue-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-debugger-via-escape-hex': [FAIL],
+ 'language/identifiers/val-debugger-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-default-via-escape-hex': [FAIL],
+ 'language/identifiers/val-default-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-delete-via-escape-hex': [FAIL],
+ 'language/identifiers/val-delete-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-do-via-escape-hex': [FAIL],
+ 'language/identifiers/val-do-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-else-via-escape-hex': [FAIL],
+ 'language/identifiers/val-else-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-enum-via-escape-hex': [FAIL],
+ 'language/identifiers/val-enum-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-export-via-escape-hex': [FAIL],
+ 'language/identifiers/val-export-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-extends-via-escape-hex': [FAIL],
+ 'language/identifiers/val-extends-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-false-via-escape-hex': [FAIL],
+ 'language/identifiers/val-false-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-finally-via-escape-hex': [FAIL],
+ 'language/identifiers/val-finally-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-for-via-escape-hex': [FAIL],
+ 'language/identifiers/val-for-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-function-via-escape-hex': [FAIL],
+ 'language/identifiers/val-function-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-if-via-escape-hex': [FAIL],
+ 'language/identifiers/val-if-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-import-via-escape-hex': [FAIL],
+ 'language/identifiers/val-import-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-in-via-escape-hex': [FAIL],
+ 'language/identifiers/val-in-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-instanceof-via-escape-hex': [FAIL],
+ 'language/identifiers/val-instanceof-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-new-via-escape-hex': [FAIL],
+ 'language/identifiers/val-new-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-null-via-escape-hex': [FAIL],
+ 'language/identifiers/val-null-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-return-via-escape-hex': [FAIL],
+ 'language/identifiers/val-return-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-super-via-escape-hex': [FAIL],
+ 'language/identifiers/val-super-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-switch-via-escape-hex': [FAIL],
+ 'language/identifiers/val-switch-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-throw-via-escape-hex': [FAIL],
+ 'language/identifiers/val-throw-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-true-via-escape-hex': [FAIL],
+ 'language/identifiers/val-true-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-try-via-escape-hex': [FAIL],
+ 'language/identifiers/val-try-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-typeof-via-escape-hex': [FAIL],
+ 'language/identifiers/val-typeof-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-var-via-escape-hex': [FAIL],
+ 'language/identifiers/val-var-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-void-via-escape-hex': [FAIL],
+ 'language/identifiers/val-void-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-while-via-escape-hex': [FAIL],
+ 'language/identifiers/val-while-via-escape-hex4': [FAIL],
+ 'language/identifiers/val-with-via-escape-hex': [FAIL],
+ 'language/identifiers/val-with-via-escape-hex4': [FAIL],
+
+ # https://code.google.com/p/v8/issues/detail?id=4362
+ 'built-ins/String/prototype/repeat/empty-string-returns-empty': [PASS, FAIL],
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
# to be either marked as bugs with issues filed for them or as deliberate
# incompatibilities if the test cases turn out to be broken or ambiguous.
- '6.2.3': [FAIL],
- '9.2.1_2': [FAIL],
- '9.2.6_2': [FAIL],
- '10.1.1_a': [FAIL],
- '10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
- '10.1.2.1_4': [FAIL],
- '10.2.3_b': [PASS, FAIL],
- '10.3_a': [FAIL],
- '11.1.1_17': [PASS, FAIL],
- '11.1.1_19': [PASS, FAIL],
- '11.1.1_20_c': [FAIL],
- '11.1.1_a': [FAIL],
- '11.1.2.1_4': [FAIL],
- '11.3.2_FN_2': [PASS, FAIL],
- '11.3.2_TRF': [PASS, FAIL],
- '11.3_a': [FAIL],
- '12.1.1_a': [FAIL],
- '12.1.2.1_4': [FAIL],
- '12.3.2_FDT_7_a_iv': [FAIL],
- '12.3.3': [FAIL],
- '12.3_a': [FAIL],
- '15.5.4.9_3': [PASS, FAIL],
+ # Some of these are related to v8:4361 in being visible side effects from Intl.
+ 'intl402/6.2.3': [FAIL],
+ 'intl402/9.2.1_2': [FAIL],
+ 'intl402/9.2.6_2': [FAIL],
+ 'intl402/Collator/10.1.2.1_4': [FAIL],
+ 'intl402/Collator/10.1.2_a': [PASS, FAIL],
+ 'intl402/Collator/10.2.3_b': [PASS, FAIL],
+ 'intl402/Collator/prototype/10.3_a': [FAIL],
+ 'intl402/Date/prototype/13.3.0_7': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1_a': [FAIL],
+ 'intl402/DateTimeFormat/12.1.1_1': [FAIL],
+ 'intl402/DateTimeFormat/12.1.2': [PASS, FAIL],
+ 'intl402/DateTimeFormat/12.1.2.1_4': [FAIL],
+ 'intl402/DateTimeFormat/12.2.3_b': [FAIL],
+ 'intl402/DateTimeFormat/prototype/12.3.2_FDT_7_a_iv': [FAIL],
+ 'intl402/DateTimeFormat/prototype/12.3.3': [FAIL],
+ 'intl402/DateTimeFormat/prototype/12.3_a': [FAIL],
+ 'intl402/DateTimeFormat/prototype/format/12.3.2_FDT_7_a_iv': [FAIL],
+ 'intl402/Number/prototype/toLocaleString/13.2.1_5': [PASS, FAIL],
+ 'intl402/NumberFormat/11.1.1_20_c': [FAIL],
+ 'intl402/NumberFormat/11.1.1_a': [FAIL],
+ 'intl402/NumberFormat/11.1.1': [FAIL],
+ 'intl402/NumberFormat/11.1.2': [PASS, FAIL],
+ 'intl402/NumberFormat/11.1.2.1_4': [FAIL],
+ 'intl402/NumberFormat/11.2.3_b': [FAIL],
+ 'intl402/NumberFormat/prototype/11.3_a': [FAIL],
+ 'intl402/String/prototype/localeCompare/13.1.1_7': [PASS, FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
- 'S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
+ 'built-ins/Math/exp/S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
# Linux for ia32 (and therefore simulators) default to extended 80 bit
# floating point formats, so these tests checking 64-bit FP precision fail.
# The other platforms/arch's pass these tests.
# We follow the other major JS engines by keeping this default.
- 'S8.5_A2.1': [PASS, FAIL_OK],
- 'S8.5_A2.2': [PASS, FAIL_OK],
+ 'language/types/number/S8.5_A2.1': [PASS, FAIL_OK],
+ 'language/types/number/S8.5_A2.2': [PASS, FAIL_OK],
############################ INVALID TESTS #############################
@@ -326,48 +693,83 @@
# tests in PST/PDT between first Sunday in March and first Sunday in April.
# The DST switch was moved in 2007 whereas Test262 bases the reference value
# on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
- 'S15.9.3.1_A5_T1': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T2': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T3': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T4': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T5': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T6': [PASS, FAIL_OK],
-
- # ObjectKeys() no longer throws TypeError when passed a primitive value which
- # is not null or undefined (per ES6).
- '15.2.3.14-1-1': [FAIL_OK],
- '15.2.3.14-1-2': [FAIL_OK],
- '15.2.3.14-1-3': [FAIL_OK],
-
- # Object.getOwnPropertyNames(O) no longer throws when passed a primitive value.
- '15.2.3.4-1-4': [FAIL_OK],
- '15.2.3.4-1-5': [FAIL_OK],
- '15.2.3.4-1': [FAIL_OK],
+ 'built-ins/Date/S15.9.3.1_A5_T1': [PASS, FAIL_OK],
+ 'built-ins/Date/S15.9.3.1_A5_T2': [PASS, FAIL_OK],
+ 'built-ins/Date/S15.9.3.1_A5_T3': [PASS, FAIL_OK],
+ 'built-ins/Date/S15.9.3.1_A5_T4': [PASS, FAIL_OK],
+ 'built-ins/Date/S15.9.3.1_A5_T5': [PASS, FAIL_OK],
+ 'built-ins/Date/S15.9.3.1_A5_T6': [PASS, FAIL_OK],
+
+ # Test makes unjustified assumptions about the number of calls to SortCompare.
+ # Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
+ 'built-ins/Array/prototype/sort/bug_596_1': [PASS, FAIL_OK],
+
+ # Tests do not return boolean.
+ 'built-ins/Object/keys/15.2.3.14-1-1': [PASS, FAIL_OK],
+ 'built-ins/Object/keys/15.2.3.14-1-2': [PASS, FAIL_OK],
+ 'built-ins/Object/keys/15.2.3.14-1-3': [PASS, FAIL_OK],
+
+ # Test bug https://github.com/tc39/test262/issues/405
+ 'intl402/Collator/prototype/compare/10.3.2_1_c': [PASS, FAIL_OK],
+ 'intl402/Collator/prototype/compare/10.3.2_CS_b_NN': [PASS, FAIL_OK],
+ 'intl402/Collator/prototype/compare/10.3.2_CS_c_NN': [PASS, FAIL_OK],
+ 'intl402/Collator/prototype/compare/10.3.2_CS_d_NN': [PASS, FAIL_OK],
+ 'intl402/Date/prototype/13.3.0_7': [PASS, FAIL_OK],
############################ SKIPPED TESTS #############################
- # These tests take a looong time to run in debug mode.
- 'S15.1.3.1_A2.5_T1': [PASS, ['mode == debug', SKIP]],
- 'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+ # These tests take a looong time to run.
+ 'built-ins/decodeURI/S15.1.3.1_A1.10_T1': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A1.11_T1': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A1.11_T2': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A1.12_T1': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A1.12_T2': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A2.5_T1': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A1.11_T1': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A1.12_T1': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
+ 'built-ins/RegExp/S15.10.2.12_A3_T1': [SKIP],
+ 'intl402/9.2.6_4_b': [SKIP],
+ 'language/literals/regexp/S7.8.5_A1.1_T2': [SKIP],
+ 'language/literals/regexp/S7.8.5_A1.4_T2': [SKIP],
+ 'language/literals/regexp/S7.8.5_A2.1_T2': [SKIP],
+ 'language/literals/regexp/S7.8.5_A2.4_T2': [SKIP],
+ 'language/statements/const/syntax/const-invalid-assignment-next-expression-for': [SKIP],
}], # ALWAYS
['system == macos', {
- '11.3.2_TRP': [FAIL],
- '9.2.5_11_g_ii_2': [FAIL],
+ 'intl402/11.3.2_TRP': [FAIL],
+ 'intl402/9.2.5_11_g_ii_2': [FAIL],
+
+ # BUG(v8:4437).
+ 'intl402/Collator/10.1.1_19_c': [SKIP],
+ 'intl402/Collator/9.2.5_11_g_ii_2': [SKIP],
+ 'intl402/NumberFormat/11.1.1_17': [SKIP],
+ 'intl402/NumberFormat/11.1.1_19': [SKIP],
+ 'intl402/NumberFormat/prototype/format/11.3.2_TRF': [SKIP],
+ 'intl402/NumberFormat/prototype/format/11.3.2_TRP': [SKIP],
}], # system == macos
-['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
+['no_i18n == True and mode == debug', {
+ # BUG(v8:4437).
+ 'built-ins/String/prototype/normalize/return-normalized-string': [SKIP],
+ 'built-ins/String/prototype/normalize/return-normalized-string-from-coerced-form': [SKIP],
+ 'built-ins/String/prototype/normalize/return-normalized-string-using-default-parameter': [SKIP],
+}], # no_i18n == True and mode == debug
+
+
+['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
- 'S13.2.1_A1_T1': [SKIP],
+ 'language/statements/function/S13.2.1_A1_T1': [SKIP],
# BUG(3251225): Tests that timeout with --nocrankshaft.
- 'S15.1.3.1_A2.4_T1': [SKIP],
- 'S15.1.3.1_A2.5_T1': [SKIP],
- 'S15.1.3.2_A2.4_T1': [SKIP],
- 'S15.1.3.2_A2.5_T1': [SKIP],
- 'S15.1.3.3_A2.3_T1': [SKIP],
- 'S15.1.3.4_A2.3_T1': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A2.4_T1': [SKIP],
+ 'built-ins/decodeURI/S15.1.3.1_A2.5_T1': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A2.4_T1': [SKIP],
+ 'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP],
+ 'built-ins/encodeURI/S15.1.3.3_A2.3_T1': [SKIP],
+ 'built-ins/encodeURIComponent/S15.1.3.4_A2.3_T1': [SKIP],
}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
]
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index a0c2cd6568..d02ad0777d 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -27,33 +27,87 @@
import hashlib
+import imp
import os
import shutil
import sys
import tarfile
+
+from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
-
-TEST_262_ARCHIVE_REVISION = "fbba29f" # This is the r365 revision.
-TEST_262_ARCHIVE_MD5 = "e1ff0db438cc12de8fb6da80621b4ef6"
+# The revision hash needs to be 7 characters?
+TEST_262_ARCHIVE_REVISION = "6137f75" # This is the 2015-08-25 revision.
+TEST_262_ARCHIVE_MD5 = "c1eaf890d46e73d6c7e05ab21f76e668"
TEST_262_URL = "https://github.com/tc39/test262/tarball/%s"
-TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js", "testIntl.js"]
+TEST_262_HARNESS_FILES = ["sta.js", "assert.js"]
+
+TEST_262_SUITE_PATH = ["data", "test"]
+TEST_262_HARNESS_PATH = ["data", "harness"]
+TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
+
+ALL_VARIANT_FLAGS_STRICT = dict(
+ (v, [flags + ["--use-strict"] for flags in flag_sets])
+ for v, flag_sets in testsuite.ALL_VARIANT_FLAGS.iteritems()
+)
+
+FAST_VARIANT_FLAGS_STRICT = dict(
+ (v, [flags + ["--use-strict"] for flags in flag_sets])
+ for v, flag_sets in testsuite.FAST_VARIANT_FLAGS.iteritems()
+)
+
+ALL_VARIANT_FLAGS_BOTH = dict(
+ (v, [flags for flags in testsuite.ALL_VARIANT_FLAGS[v] +
+ ALL_VARIANT_FLAGS_STRICT[v]])
+ for v in testsuite.ALL_VARIANT_FLAGS
+)
+
+FAST_VARIANT_FLAGS_BOTH = dict(
+ (v, [flags for flags in testsuite.FAST_VARIANT_FLAGS[v] +
+ FAST_VARIANT_FLAGS_STRICT[v]])
+ for v in testsuite.FAST_VARIANT_FLAGS
+)
+
+ALL_VARIANTS = {
+ 'nostrict': testsuite.ALL_VARIANT_FLAGS,
+ 'strict': ALL_VARIANT_FLAGS_STRICT,
+ 'both': ALL_VARIANT_FLAGS_BOTH,
+}
+
+FAST_VARIANTS = {
+ 'nostrict': testsuite.FAST_VARIANT_FLAGS,
+ 'strict': FAST_VARIANT_FLAGS_STRICT,
+ 'both': FAST_VARIANT_FLAGS_BOTH,
+}
+
+class Test262VariantGenerator(testsuite.VariantGenerator):
+ def GetFlagSets(self, testcase, variant):
+ if testcase.outcomes and statusfile.OnlyFastVariants(testcase.outcomes):
+ variant_flags = FAST_VARIANTS
+ else:
+ variant_flags = ALL_VARIANTS
+
+ test_record = self.suite.GetTestRecord(testcase)
+ if "noStrict" in test_record:
+ return variant_flags["nostrict"][variant]
+ if "onlyStrict" in test_record:
+ return variant_flags["strict"][variant]
+ return variant_flags["both"][variant]
class Test262TestSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(Test262TestSuite, self).__init__(name, root)
- self.testroot = os.path.join(root, "data", "test", "suite")
- self.harness = [os.path.join(self.root, "data", "test", "harness", f)
- for f in TEST_262_HARNESS]
+ self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
+ self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
+ self.harness = [os.path.join(self.harnesspath, f)
+ for f in TEST_262_HARNESS_FILES]
self.harness += [os.path.join(self.root, "harness-adapt.js")]
-
- def CommonTestName(self, testcase):
- return testcase.path.split(os.path.sep)[-1]
+ self.ParseTestRecord = None
def ListTests(self, context):
tests = []
@@ -66,29 +120,74 @@ class Test262TestSuite(testsuite.TestSuite):
files.sort()
for filename in files:
if filename.endswith(".js"):
- testname = os.path.join(dirname[len(self.testroot) + 1:],
- filename[:-3])
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.testroot) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
case = testcase.TestCase(self, testname)
tests.append(case)
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + context.mode_flags + self.harness +
+ self.GetIncludesForTest(testcase) + ["--harmony"] +
[os.path.join(self.testroot, testcase.path + ".js")])
+ def _VariantGeneratorFactory(self):
+ return Test262VariantGenerator
+
+ def LoadParseTestRecord(self):
+ if not self.ParseTestRecord:
+ root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
+ f = None
+ try:
+ (f, pathname, description) = imp.find_module("parseTestRecord", [root])
+ module = imp.load_module("parseTestRecord", f, pathname, description)
+ self.ParseTestRecord = module.parseTestRecord
+ except:
+ raise ImportError("Cannot load parseTestRecord; you may need to "
+ "--download-data for test262")
+ finally:
+ if f:
+ f.close()
+ return self.ParseTestRecord
+
+ def GetTestRecord(self, testcase):
+ if not hasattr(testcase, "test_record"):
+ ParseTestRecord = self.LoadParseTestRecord()
+ testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
+ testcase.path)
+ return testcase.test_record
+
+ def GetIncludesForTest(self, testcase):
+ test_record = self.GetTestRecord(testcase)
+ if "includes" in test_record:
+ includes = [os.path.join(self.harnesspath, f)
+ for f in test_record["includes"]]
+ else:
+ includes = []
+ return includes
+
def GetSourceForTest(self, testcase):
filename = os.path.join(self.testroot, testcase.path + ".js")
with open(filename) as f:
return f.read()
def IsNegativeTest(self, testcase):
- return "@negative" in self.GetSourceForTest(testcase)
+ test_record = self.GetTestRecord(testcase)
+ return "negative" in test_record
def IsFailureOutput(self, output, testpath):
if output.exit_code != 0:
return True
return "FAILED!" in output.stdout
+ def HasUnexpectedOutput(self, testcase):
+ outcome = self.GetOutcome(testcase)
+ if (statusfile.FAIL_SLOPPY in testcase.outcomes and
+ "--use-strict" not in testcase.flags):
+ return outcome != statusfile.FAIL
+ return not outcome in (testcase.outcomes or [statusfile.PASS])
+
def DownloadData(self):
revision = TEST_262_ARCHIVE_REVISION
archive_url = TEST_262_URL % revision
@@ -123,7 +222,7 @@ class Test262TestSuite(testsuite.TestSuite):
if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
os.remove(archive_name)
print "MD5 expected %s" % TEST_262_ARCHIVE_MD5
- raise Exception("Hash mismatch of test data file")
+ raise Exception("MD5 hash mismatch of test data file")
archive = tarfile.open(archive_name, "r:gz")
if sys.platform in ("win32", "cygwin"):
# Magic incantation to allow longer path names on Windows.
diff --git a/deps/v8/test/unittests/atomic-utils-unittest.cc b/deps/v8/test/unittests/atomic-utils-unittest.cc
new file mode 100644
index 0000000000..ad33853d58
--- /dev/null
+++ b/deps/v8/test/unittests/atomic-utils-unittest.cc
@@ -0,0 +1,217 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+
+#include "src/atomic-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(AtomicNumber, Constructor) {
+ // Test some common types.
+ AtomicNumber<int> zero_int;
+ AtomicNumber<size_t> zero_size_t;
+ AtomicNumber<intptr_t> zero_intptr_t;
+ EXPECT_EQ(0, zero_int.Value());
+ EXPECT_EQ(0U, zero_size_t.Value());
+ EXPECT_EQ(0, zero_intptr_t.Value());
+}
+
+
+TEST(AtomicNumber, Value) {
+ AtomicNumber<int> a(1);
+ EXPECT_EQ(1, a.Value());
+ AtomicNumber<int> b(-1);
+ EXPECT_EQ(-1, b.Value());
+ AtomicNumber<size_t> c(1);
+ EXPECT_EQ(1U, c.Value());
+ AtomicNumber<size_t> d(static_cast<size_t>(-1));
+ EXPECT_EQ(std::numeric_limits<size_t>::max(), d.Value());
+}
+
+
+TEST(AtomicNumber, SetValue) {
+ AtomicNumber<int> a(1);
+ a.SetValue(-1);
+ EXPECT_EQ(-1, a.Value());
+}
+
+
+TEST(AtomicNumber, Increment) {
+ AtomicNumber<int> a(std::numeric_limits<int>::max());
+ a.Increment(1);
+ EXPECT_EQ(std::numeric_limits<int>::min(), a.Value());
+ // Check that potential signed-ness of the underlying storage has no impact
+ // on unsigned types.
+ AtomicNumber<size_t> b(std::numeric_limits<intptr_t>::max());
+ b.Increment(1);
+ EXPECT_EQ(static_cast<size_t>(std::numeric_limits<intptr_t>::max()) + 1,
+ b.Value());
+ // Should work as decrement as well.
+ AtomicNumber<size_t> c(1);
+ c.Increment(-1);
+ EXPECT_EQ(0U, c.Value());
+ c.Increment(-1);
+ EXPECT_EQ(std::numeric_limits<size_t>::max(), c.Value());
+}
+
+
+namespace {
+
+enum TestFlag {
+ kA,
+ kB,
+ kC,
+};
+
+} // namespace
+
+
+TEST(AtomicValue, Initial) {
+ AtomicValue<TestFlag> a(kA);
+ EXPECT_EQ(TestFlag::kA, a.Value());
+}
+
+
+TEST(AtomicValue, TrySetValue) {
+ AtomicValue<TestFlag> a(kA);
+ EXPECT_FALSE(a.TrySetValue(kB, kC));
+ EXPECT_TRUE(a.TrySetValue(kA, kC));
+ EXPECT_EQ(TestFlag::kC, a.Value());
+}
+
+
+TEST(AtomicValue, SetValue) {
+ AtomicValue<TestFlag> a(kB);
+ a.SetValue(kC);
+ EXPECT_EQ(TestFlag::kC, a.Value());
+}
+
+
+TEST(AtomicValue, WithVoidStar) {
+ AtomicValue<void*> a(nullptr);
+ AtomicValue<void*> dummy(nullptr);
+ EXPECT_EQ(nullptr, a.Value());
+ a.SetValue(&a);
+ EXPECT_EQ(&a, a.Value());
+ EXPECT_FALSE(a.TrySetValue(nullptr, &dummy));
+ EXPECT_TRUE(a.TrySetValue(&a, &dummy));
+ EXPECT_EQ(&dummy, a.Value());
+}
+
+
+namespace {
+
+enum TestSetValue { kAA, kBB, kCC, kLastValue = kCC };
+
+} // namespace
+
+
+TEST(AtomicEnumSet, Constructor) {
+ AtomicEnumSet<TestSetValue> a;
+ EXPECT_TRUE(a.IsEmpty());
+ EXPECT_FALSE(a.Contains(kAA));
+}
+
+
+TEST(AtomicEnumSet, AddSingle) {
+ AtomicEnumSet<TestSetValue> a;
+ a.Add(kAA);
+ EXPECT_FALSE(a.IsEmpty());
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_FALSE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+}
+
+
+TEST(AtomicEnumSet, AddOtherSet) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ EXPECT_FALSE(a.IsEmpty());
+ EXPECT_TRUE(b.IsEmpty());
+ b.Add(a);
+ EXPECT_FALSE(b.IsEmpty());
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(b.Contains(kAA));
+}
+
+
+TEST(AtomicEnumSet, RemoveSingle) {
+ AtomicEnumSet<TestSetValue> a;
+ a.Add(kAA);
+ a.Add(kBB);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+ a.Remove(kAA);
+ EXPECT_FALSE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+}
+
+
+TEST(AtomicEnumSet, RemoveOtherSet) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ a.Add(kBB);
+ b.Add(kBB);
+ a.Remove(b);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_FALSE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+}
+
+
+TEST(AtomicEnumSet, RemoveEmptySet) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ a.Add(kBB);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+ EXPECT_TRUE(b.IsEmpty());
+ a.Remove(b);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+}
+
+
+TEST(AtomicEnumSet, Intersect) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ b.Add(kCC);
+ a.Intersect(b);
+ EXPECT_TRUE(a.IsEmpty());
+}
+
+
+TEST(AtomicEnumSet, ContainsAnyOf) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ b.Add(kCC);
+ EXPECT_FALSE(a.ContainsAnyOf(b));
+ b.Add(kAA);
+ EXPECT_TRUE(a.ContainsAnyOf(b));
+}
+
+
+TEST(AtomicEnumSet, Equality) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ b.Add(kAA);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 1bc23e6e0a..941fa26e00 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1392,6 +1392,7 @@ struct Comparison {
const char* constructor_name;
FlagsCondition flags_condition;
FlagsCondition negated_flags_condition;
+ FlagsCondition commuted_flags_condition;
};
@@ -1401,15 +1402,17 @@ std::ostream& operator<<(std::ostream& os, const Comparison& cmp) {
const Comparison kComparisons[] = {
- {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual,
+ kEqual},
{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kSignedLessThan,
- kSignedGreaterThanOrEqual},
+ kSignedGreaterThanOrEqual, kSignedGreaterThan},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kSignedLessThanOrEqual, kSignedGreaterThan},
+ kSignedLessThanOrEqual, kSignedGreaterThan, kSignedGreaterThanOrEqual},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kUnsignedLessThan,
- kUnsignedGreaterThanOrEqual},
+ kUnsignedGreaterThanOrEqual, kUnsignedGreaterThan},
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kUnsignedLessThanOrEqual, kUnsignedGreaterThan}};
+ kUnsignedLessThanOrEqual, kUnsignedGreaterThan,
+ kUnsignedGreaterThanOrEqual}};
} // namespace
@@ -1495,11 +1498,13 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
namespace {
const Comparison kF32Comparisons[] = {
- {&RawMachineAssembler::Float32Equal, "Float32Equal", kEqual, kNotEqual},
+ {&RawMachineAssembler::Float32Equal, "Float32Equal", kEqual, kNotEqual,
+ kEqual},
{&RawMachineAssembler::Float32LessThan, "Float32LessThan",
- kUnsignedLessThan, kUnsignedGreaterThanOrEqual},
+ kFloatLessThan, kFloatGreaterThanOrEqualOrUnordered, kFloatGreaterThan},
{&RawMachineAssembler::Float32LessThanOrEqual, "Float32LessThanOrEqual",
- kUnsignedLessThanOrEqual, kUnsignedGreaterThan}};
+ kFloatLessThanOrEqual, kFloatGreaterThanOrUnordered,
+ kFloatGreaterThanOrEqual}};
} // namespace
@@ -1551,33 +1556,36 @@ TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnRight) {
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorF32ComparisonTest,
- ::testing::ValuesIn(kF32Comparisons));
-
-
-TEST_F(InstructionSelectorTest, Float32EqualWithImmediateZeroOnLeft) {
+TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnLeft) {
+ const Comparison& cmp = GetParam();
StreamBuilder m(this, kMachInt32, kMachFloat32);
- m.Return(m.Float32Equal(m.Float32Constant(0.0f), m.Parameter(0)));
- Stream s = m.Build();
+ m.Return((m.*cmp.constructor)(m.Float32Constant(0.0f), m.Parameter(0)));
+ Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVcmpF32, s[0]->arch_opcode());
- EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
- EXPECT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(cmp.commuted_flags_condition, s[0]->flags_condition());
}
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorF32ComparisonTest,
+ ::testing::ValuesIn(kF32Comparisons));
+
+
namespace {
const Comparison kF64Comparisons[] = {
- {&RawMachineAssembler::Float64Equal, "Float64Equal", kEqual, kNotEqual},
+ {&RawMachineAssembler::Float64Equal, "Float64Equal", kEqual, kNotEqual,
+ kEqual},
{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
- kUnsignedLessThan, kUnsignedGreaterThanOrEqual},
+ kFloatLessThan, kFloatGreaterThanOrEqualOrUnordered, kFloatGreaterThan},
{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kUnsignedLessThanOrEqual, kUnsignedGreaterThan}};
+ kFloatLessThanOrEqual, kFloatGreaterThanOrUnordered,
+ kFloatGreaterThanOrEqual}};
} // namespace
@@ -1629,25 +1637,26 @@ TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnRight) {
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
- InstructionSelectorF64ComparisonTest,
- ::testing::ValuesIn(kF64Comparisons));
-
-
-TEST_F(InstructionSelectorTest, Float64EqualWithImmediateZeroOnLeft) {
+TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnLeft) {
+ const Comparison& cmp = GetParam();
StreamBuilder m(this, kMachInt32, kMachFloat64);
- m.Return(m.Float64Equal(m.Float64Constant(0.0), m.Parameter(0)));
- Stream s = m.Build();
+ m.Return((m.*cmp.constructor)(m.Float64Constant(0.0), m.Parameter(0)));
+ Stream const s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
- EXPECT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
- EXPECT_EQ(1U, s[0]->OutputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(cmp.commuted_flags_condition, s[0]->flags_condition());
}
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorF64ComparisonTest,
+ ::testing::ValuesIn(kF64Comparisons));
+
+
// -----------------------------------------------------------------------------
// Floating point arithmetic.
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 71c2d44d2f..154645c1fb 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -220,6 +220,7 @@ const MachInst2 kFPArithInstructions[] = {
struct FPCmp {
MachInst2 mi;
FlagsCondition cond;
+ FlagsCondition commuted_cond;
};
@@ -232,13 +233,22 @@ std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
kMachFloat64},
- kEqual},
+ kEqual, kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
kArm64Float64Cmp, kMachFloat64},
- kUnsignedLessThan},
+ kFloatLessThan, kFloatGreaterThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
kArm64Float64Cmp, kMachFloat64},
- kUnsignedLessThanOrEqual}};
+ kFloatLessThanOrEqual, kFloatGreaterThanOrEqual},
+ {{&RawMachineAssembler::Float32Equal, "Float32Equal", kArm64Float32Cmp,
+ kMachFloat32},
+ kEqual, kEqual},
+ {{&RawMachineAssembler::Float32LessThan, "Float32LessThan",
+ kArm64Float32Cmp, kMachFloat32},
+ kFloatLessThan, kFloatGreaterThan},
+ {{&RawMachineAssembler::Float32LessThanOrEqual, "Float32LessThanOrEqual",
+ kArm64Float32Cmp, kMachFloat32},
+ kFloatLessThanOrEqual, kFloatGreaterThanOrEqual}};
struct Conversion {
@@ -1913,7 +1923,11 @@ TEST_P(InstructionSelectorFPCmpTest, Parameter) {
TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnRight) {
const FPCmp cmp = GetParam();
StreamBuilder m(this, kMachInt32, cmp.mi.machine_type);
- m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
+ if (cmp.mi.machine_type == kMachFloat64) {
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
+ } else {
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float32Constant(0.0f)));
+ }
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
@@ -1925,24 +1939,29 @@ TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnRight) {
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
- ::testing::ValuesIn(kFPCmpInstructions));
-
-
-TEST_F(InstructionSelectorTest, Float64EqualWithImmediateZeroOnLeft) {
- StreamBuilder m(this, kMachInt32, kMachFloat64);
- m.Return(m.Float64Equal(m.Float64Constant(0.0), m.Parameter(0)));
+TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnLeft) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, kMachInt32, cmp.mi.machine_type);
+ if (cmp.mi.machine_type == kMachFloat64) {
+ m.Return((m.*cmp.mi.constructor)(m.Float64Constant(0.0), m.Parameter(0)));
+ } else {
+ m.Return((m.*cmp.mi.constructor)(m.Float32Constant(0.0f), m.Parameter(0)));
+ }
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Float64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
EXPECT_EQ(2U, s[0]->InputCount());
EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kFlags_set, s[0]->flags_mode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
}
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+
// -----------------------------------------------------------------------------
// Conversions.
diff --git a/deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc
new file mode 100644
index 0000000000..22b9b893e8
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/bytecode-graph-builder-unittest.cc
@@ -0,0 +1,248 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iostream>
+
+#include "src/compiler/bytecode-graph-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/parser.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "test/unittests/test-utils.h"
+
+using ::testing::_;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BytecodeGraphBuilderTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeGraphBuilderTest() : array_builder_(isolate(), zone()) {}
+
+ Graph* GetCompletedGraph();
+
+ Matcher<Node*> IsUndefinedConstant();
+ Matcher<Node*> IsNullConstant();
+ Matcher<Node*> IsTheHoleConstant();
+ Matcher<Node*> IsFalseConstant();
+ Matcher<Node*> IsTrueConstant();
+
+ interpreter::BytecodeArrayBuilder* array_builder() { return &array_builder_; }
+
+ private:
+ interpreter::BytecodeArrayBuilder array_builder_;
+
+ DISALLOW_COPY_AND_ASSIGN(BytecodeGraphBuilderTest);
+};
+
+
+Graph* BytecodeGraphBuilderTest::GetCompletedGraph() {
+ MachineOperatorBuilder* machine = new (zone()) MachineOperatorBuilder(
+ zone(), kMachPtr, InstructionSelector::SupportedMachineOperatorFlags());
+ CommonOperatorBuilder* common = new (zone()) CommonOperatorBuilder(zone());
+ JSOperatorBuilder* javascript = new (zone()) JSOperatorBuilder(zone());
+ Graph* graph = new (zone()) Graph(zone());
+ JSGraph* jsgraph =
+ new (zone()) JSGraph(isolate(), graph, common, javascript, machine);
+
+ Handle<String> name = factory()->NewStringFromStaticChars("test");
+ Handle<String> script = factory()->NewStringFromStaticChars("test() {}");
+ Handle<SharedFunctionInfo> shared_info =
+ factory()->NewSharedFunctionInfo(name, MaybeHandle<Code>());
+ shared_info->set_script(*factory()->NewScript(script));
+
+ ParseInfo parse_info(zone(), shared_info);
+ CompilationInfo info(&parse_info);
+ Handle<BytecodeArray> bytecode_array = array_builder()->ToBytecodeArray();
+ info.shared_info()->set_function_data(*bytecode_array);
+
+ BytecodeGraphBuilder graph_builder(zone(), &info, jsgraph);
+ graph_builder.CreateGraph();
+ return graph;
+}
+
+
+Matcher<Node*> BytecodeGraphBuilderTest::IsUndefinedConstant() {
+ return IsHeapConstant(factory()->undefined_value());
+}
+
+
+Matcher<Node*> BytecodeGraphBuilderTest::IsNullConstant() {
+ return IsHeapConstant(factory()->null_value());
+}
+
+
+Matcher<Node*> BytecodeGraphBuilderTest::IsTheHoleConstant() {
+ return IsHeapConstant(factory()->the_hole_value());
+}
+
+
+Matcher<Node*> BytecodeGraphBuilderTest::IsFalseConstant() {
+ return IsHeapConstant(factory()->false_value());
+}
+
+
+Matcher<Node*> BytecodeGraphBuilderTest::IsTrueConstant() {
+ return IsHeapConstant(factory()->true_value());
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnUndefined) {
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadUndefined().Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ Node* effect = graph->start();
+ Node* control = graph->start();
+ EXPECT_THAT(ret, IsReturn(IsUndefinedConstant(), effect, control));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnNull) {
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadNull().Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ EXPECT_THAT(ret, IsReturn(IsNullConstant(), graph->start(), graph->start()));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnTheHole) {
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadTheHole().Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ Node* effect = graph->start();
+ Node* control = graph->start();
+ EXPECT_THAT(ret, IsReturn(IsTheHoleConstant(), effect, control));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnTrue) {
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadTrue().Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ Node* effect = graph->start();
+ Node* control = graph->start();
+ EXPECT_THAT(ret, IsReturn(IsTrueConstant(), effect, control));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnFalse) {
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadFalse().Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ Node* effect = graph->start();
+ Node* control = graph->start();
+ EXPECT_THAT(ret, IsReturn(IsFalseConstant(), effect, control));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnInt8) {
+ static const int kValue = 3;
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadLiteral(Smi::FromInt(kValue)).Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ Node* effect = graph->start();
+ Node* control = graph->start();
+ EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), effect, control));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, ReturnDouble) {
+ const double kValue = 0.123456789;
+ array_builder()->set_locals_count(0);
+ array_builder()->set_parameter_count(1);
+ array_builder()->LoadLiteral(factory()->NewHeapNumber(kValue));
+ array_builder()->Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ Node* effect = graph->start();
+ Node* control = graph->start();
+ EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), effect, control));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, SimpleExpressionWithParameters) {
+ array_builder()->set_locals_count(1);
+ array_builder()->set_parameter_count(3);
+ array_builder()
+ ->LoadAccumulatorWithRegister(array_builder()->Parameter(1))
+ .BinaryOperation(Token::Value::ADD, array_builder()->Parameter(2))
+ .StoreAccumulatorInRegister(interpreter::Register(0))
+ .Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ // NB binary operation is <reg> <op> <acc>. The register represents
+ // the left-hand side, which is why parameters appear in opposite
+ // order to construction via the builder.
+ EXPECT_THAT(ret, IsReturn(IsJSAdd(IsParameter(2), IsParameter(1)), _, _));
+}
+
+
+TEST_F(BytecodeGraphBuilderTest, SimpleExpressionWithRegister) {
+ static const int kLeft = -655371;
+ static const int kRight = +2000000;
+ array_builder()->set_locals_count(1);
+ array_builder()->set_parameter_count(1);
+ array_builder()
+ ->LoadLiteral(Smi::FromInt(kLeft))
+ .StoreAccumulatorInRegister(interpreter::Register(0))
+ .LoadLiteral(Smi::FromInt(kRight))
+ .BinaryOperation(Token::Value::ADD, interpreter::Register(0))
+ .Return();
+
+ Graph* graph = GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* ret = end->InputAt(0);
+ EXPECT_THAT(
+ ret, IsReturn(IsJSAdd(IsNumberConstant(kLeft), IsNumberConstant(kRight)),
+ _, _));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
index 0c0df6b8e6..fd2d7c4dae 100644
--- a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
@@ -45,10 +45,9 @@ class ChangeLoweringTest : public TypedGraphTest {
Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
- return IsCall(_, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- AllocateHeapNumberStub(isolate()).GetCode())),
- IsNumberConstant(BitEq(0.0)), effect_matcher,
- control_matcher);
+ return IsCall(
+ _, IsHeapConstant(AllocateHeapNumberStub(isolate()).GetCode()),
+ IsNumberConstant(BitEq(0.0)), effect_matcher, control_matcher);
}
Matcher<Node*> IsChangeInt32ToSmi(const Matcher<Node*>& value_matcher) {
return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher),
diff --git a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc b/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
index ea9ebdb20b..fe8fac4bfe 100644
--- a/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
+++ b/deps/v8/test/unittests/compiler/coalesced-live-ranges-unittest.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/coalesced-live-ranges.h"
+#include "test/unittests/compiler/live-range-builder.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -10,49 +11,6 @@ namespace internal {
namespace compiler {
-// Utility offering shorthand syntax for building up a range by providing its ID
-// and pairs (start, end) specifying intervals. Circumvents current incomplete
-// support for C++ features such as instantiation lists, on OS X and Android.
-class TestRangeBuilder {
- public:
- explicit TestRangeBuilder(Zone* zone) : id_(-1), pairs_(), zone_(zone) {}
-
- TestRangeBuilder& Id(int id) {
- id_ = id;
- return *this;
- }
- TestRangeBuilder& Add(int start, int end) {
- pairs_.push_back({start, end});
- return *this;
- }
-
- LiveRange* Build(int start, int end) { return Add(start, end).Build(); }
-
- LiveRange* Build() {
- LiveRange* range = new (zone_) LiveRange(id_, MachineType::kRepTagged);
- // Traverse the provided interval specifications backwards, because that is
- // what LiveRange expects.
- for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
- Interval pair = pairs_[i];
- LifetimePosition start = LifetimePosition::FromInt(pair.first);
- LifetimePosition end = LifetimePosition::FromInt(pair.second);
- CHECK(start < end);
- range->AddUseInterval(start, end, zone_);
- }
-
- pairs_.clear();
- return range;
- }
-
- private:
- typedef std::pair<int, int> Interval;
- typedef std::vector<Interval> IntervalList;
- int id_;
- IntervalList pairs_;
- Zone* zone_;
-};
-
-
class CoalescedLiveRangesTest : public TestWithZone {
public:
CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
@@ -100,8 +58,9 @@ void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
LiveRangeIDs seen(zone());
for (auto c = conflicts.Current(); c != nullptr;
c = conflicts.RemoveCurrentAndGetNext()) {
- EXPECT_FALSE(seen.count(c->id()) > 0);
- seen.insert(c->id());
+ int id = c->TopLevel()->vreg();
+ EXPECT_FALSE(seen.count(id) > 0);
+ seen.insert(c->TopLevel()->vreg());
}
}
@@ -118,7 +77,7 @@ bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
auto conflicts = ranges().GetConflicts(range);
for (auto conflict = conflicts.Current(); conflict != nullptr;
conflict = conflicts.GetNext()) {
- found_ids.insert(conflict->id());
+ found_ids.insert(conflict->TopLevel()->vreg());
}
return found_ids == ids;
}
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 8765c13271..66eb140d27 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -148,18 +148,6 @@ const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::signaling_NaN()};
-const double kDoubleValues[] = {-std::numeric_limits<double>::infinity(),
- std::numeric_limits<double>::min(),
- -1.0,
- -0.0,
- 0.0,
- 1.0,
- std::numeric_limits<double>::max(),
- std::numeric_limits<double>::infinity(),
- std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::signaling_NaN()};
-
-
const size_t kInputCounts[] = {3, 4, 100, 255, 1024, 65000};
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index 47be5407f7..d383bf7c43 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -70,6 +70,10 @@ class ControlEquivalenceTest : public GraphTest {
return Store(graph()->NewNode(common()->IfFalse(), control));
}
+ Node* Merge1(Node* control) {
+ return Store(graph()->NewNode(common()->Merge(1), control));
+ }
+
Node* Merge2(Node* control1, Node* control2) {
return Store(graph()->NewNode(common()->Merge(2), control1, control2));
}
@@ -107,10 +111,10 @@ TEST_F(ControlEquivalenceTest, Empty1) {
TEST_F(ControlEquivalenceTest, Empty2) {
Node* start = graph()->start();
- Node* end = End(start);
- ComputeEquivalence(end);
+ Node* merge1 = Merge1(start);
+ ComputeEquivalence(merge1);
- ASSERT_EQUIVALENCE(start, end);
+ ASSERT_EQUIVALENCE(start, merge1);
}
diff --git a/deps/v8/test/unittests/compiler/diamond-unittest.cc b/deps/v8/test/unittests/compiler/diamond-unittest.cc
index c14886fbb7..50c50d4e69 100644
--- a/deps/v8/test/unittests/compiler/diamond-unittest.cc
+++ b/deps/v8/test/unittests/compiler/diamond-unittest.cc
@@ -128,22 +128,6 @@ TEST_F(DiamondTest, DiamondPhis) {
}
-TEST_F(DiamondTest, DiamondEffectPhis) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* p2 = Parameter(2);
- Diamond d(graph(), common(), p0);
-
- Node* phi = d.EffectPhi(p1, p2);
-
- EXPECT_THAT(d.branch, IsBranch(p0, graph()->start()));
- EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
- EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
- EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
- EXPECT_THAT(phi, IsEffectPhi(p1, p2, d.merge));
-}
-
-
TEST_F(DiamondTest, BranchHint) {
Diamond dn(graph(), common(), Parameter(0));
CHECK(BranchHint::kNone == BranchHintOf(dn.branch->op()));
diff --git a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
index 3ca6052af9..8d05c526c3 100644
--- a/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
@@ -5,6 +5,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/operator.h"
#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/test-utils.h"
@@ -63,15 +64,15 @@ class InPlaceABReducer final : public Reducer {
switch (node->op()->opcode()) {
case kOpcodeA0:
EXPECT_EQ(0, node->InputCount());
- node->set_op(&kOpB0);
+ NodeProperties::ChangeOp(node, &kOpB0);
return Replace(node);
case kOpcodeA1:
EXPECT_EQ(1, node->InputCount());
- node->set_op(&kOpB1);
+ NodeProperties::ChangeOp(node, &kOpB1);
return Replace(node);
case kOpcodeA2:
EXPECT_EQ(2, node->InputCount());
- node->set_op(&kOpB2);
+ NodeProperties::ChangeOp(node, &kOpB2);
return Replace(node);
}
return NoChange();
@@ -178,15 +179,15 @@ class InPlaceBCReducer final : public Reducer {
switch (node->op()->opcode()) {
case kOpcodeB0:
EXPECT_EQ(0, node->InputCount());
- node->set_op(&kOpC0);
+ NodeProperties::ChangeOp(node, &kOpC0);
return Replace(node);
case kOpcodeB1:
EXPECT_EQ(1, node->InputCount());
- node->set_op(&kOpC1);
+ NodeProperties::ChangeOp(node, &kOpC1);
return Replace(node);
case kOpcodeB2:
EXPECT_EQ(2, node->InputCount());
- node->set_op(&kOpC2);
+ NodeProperties::ChangeOp(node, &kOpC2);
return Replace(node);
}
return NoChange();
@@ -290,7 +291,8 @@ const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
TEST_F(AdvancedReducerTest, ReplaceWithValue_ValueUse) {
CommonOperatorBuilder common(zone());
Node* node = graph()->NewNode(&kMockOperator);
- Node* use_value = graph()->NewNode(common.Return(), node);
+ Node* start = graph()->NewNode(common.Start(1));
+ Node* use_value = graph()->NewNode(common.Return(), node, start, start);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), nullptr);
ReplaceWithValueReducer r(&graph_reducer);
@@ -306,16 +308,18 @@ TEST_F(AdvancedReducerTest, ReplaceWithValue_EffectUse) {
CommonOperatorBuilder common(zone());
Node* start = graph()->NewNode(common.Start(1));
Node* node = graph()->NewNode(&kMockOpEffect, start);
- Node* use_effect = graph()->NewNode(common.EffectPhi(1), node);
+ Node* use_control = graph()->NewNode(common.Merge(1), start);
+ Node* use_effect = graph()->NewNode(common.EffectPhi(1), node, use_control);
Node* replacement = graph()->NewNode(&kMockOperator);
GraphReducer graph_reducer(zone(), graph(), nullptr);
ReplaceWithValueReducer r(&graph_reducer);
r.ReplaceWithValue(node, replacement);
EXPECT_EQ(start, use_effect->InputAt(0));
EXPECT_EQ(0, node->UseCount());
- EXPECT_EQ(2, start->UseCount());
+ EXPECT_EQ(3, start->UseCount());
EXPECT_EQ(0, replacement->UseCount());
- EXPECT_THAT(start->uses(), UnorderedElementsAre(use_effect, node));
+ EXPECT_THAT(start->uses(),
+ UnorderedElementsAre(use_effect, use_control, node));
}
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.cc b/deps/v8/test/unittests/compiler/graph-unittest.cc
index 6b8546b95a..399f985370 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.cc
+++ b/deps/v8/test/unittests/compiler/graph-unittest.cc
@@ -5,6 +5,8 @@
#include "test/unittests/compiler/graph-unittest.h"
#include "src/compiler/node-properties.h"
+#include "src/factory.h"
+#include "src/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
#include "test/unittests/compiler/node-test-utils.h"
namespace v8 {
@@ -51,33 +53,25 @@ Node* GraphTest::NumberConstant(volatile double value) {
Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
- return HeapConstant(Unique<HeapObject>::CreateUninitialized(value));
-}
-
-
-Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
Node* node = graph()->NewNode(common()->HeapConstant(value));
- Type* type = Type::Constant(value.handle(), zone());
- NodeProperties::SetBounds(node, Bounds(type));
+ Type* type = Type::Constant(value, zone());
+ NodeProperties::SetType(node, type);
return node;
}
Node* GraphTest::FalseConstant() {
- return HeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+ return HeapConstant(factory()->false_value());
}
Node* GraphTest::TrueConstant() {
- return HeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+ return HeapConstant(factory()->true_value());
}
Node* GraphTest::UndefinedConstant() {
- return HeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+ return HeapConstant(factory()->undefined_value());
}
@@ -92,20 +86,17 @@ Node* GraphTest::EmptyFrameState() {
Matcher<Node*> GraphTest::IsFalseConstant() {
- return IsHeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+ return IsHeapConstant(factory()->false_value());
}
Matcher<Node*> GraphTest::IsTrueConstant() {
- return IsHeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+ return IsHeapConstant(factory()->true_value());
}
Matcher<Node*> GraphTest::IsUndefinedConstant() {
- return IsHeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+ return IsHeapConstant(factory()->undefined_value());
}
@@ -118,7 +109,7 @@ TypedGraphTest::~TypedGraphTest() {}
Node* TypedGraphTest::Parameter(Type* type, int32_t index) {
Node* node = GraphTest::Parameter(index);
- NodeProperties::SetBounds(node, Bounds(type));
+ NodeProperties::SetType(node, type);
return node;
}
diff --git a/deps/v8/test/unittests/compiler/graph-unittest.h b/deps/v8/test/unittests/compiler/graph-unittest.h
index 2318fa61f8..9c99992511 100644
--- a/deps/v8/test/unittests/compiler/graph-unittest.h
+++ b/deps/v8/test/unittests/compiler/graph-unittest.h
@@ -18,8 +18,6 @@ namespace internal {
template <class T>
class Handle;
class HeapObject;
-template <class T>
-class Unique;
namespace compiler {
@@ -45,7 +43,6 @@ class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
Node* Int64Constant(int64_t value);
Node* NumberConstant(volatile double value);
Node* HeapConstant(const Handle<HeapObject>& value);
- Node* HeapConstant(const Unique<HeapObject>& value);
Node* FalseConstant();
Node* TrueConstant();
Node* UndefinedConstant();
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index 1636b7ee5b..e14382e914 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -248,7 +248,7 @@ TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
TARGET_TEST_F(InstructionSelectorTest, Finish) {
StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
Node* param = m.Parameter(0);
- Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
+ Node* finish = m.AddNode(m.common()->Finish(1), param, m.graph()->start());
m.Return(finish);
Stream s = m.Build(kAllInstructions);
ASSERT_EQ(4U, s.size());
@@ -334,8 +334,8 @@ TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
Stream s1 = m1.Build(kAllInstructions);
StreamBuilder m2(this, kMachInt32, kMachPtr);
Node* p2 = m2.Parameter(0);
- m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
- m2.NewNode(m2.common()->ValueEffect(1), p2)));
+ m2.Return(m2.AddNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
+ m2.AddNode(m2.common()->ValueEffect(1), p2)));
Stream s2 = m2.Build(kAllInstructions);
EXPECT_LE(3U, s1.size());
ASSERT_EQ(s1.size(), s2.size());
@@ -366,18 +366,23 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
ZoneVector<MachineType> empty_types(zone());
+ CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
+ zone(), false, 1, CallDescriptor::kNeedsFrameState);
+
Node* parameters =
- m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(1));
- Node* locals = m.NewNode(m.common()->TypedStateValues(&empty_types));
- Node* stack = m.NewNode(m.common()->TypedStateValues(&empty_types));
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(1));
+ Node* locals = m.AddNode(m.common()->TypedStateValues(&empty_types));
+ Node* stack = m.AddNode(m.common()->TypedStateValues(&empty_types));
Node* context_dummy = m.Int32Constant(0);
- Node* state_node = m.NewNode(
+ Node* state_node = m.AddNode(
m.common()->FrameState(bailout_id, OutputFrameStateCombine::Push(),
m.GetFrameStateFunctionInfo(1, 0)),
parameters, locals, stack, context_dummy, function_node,
m.UndefinedConstant());
- Node* call = m.CallJS0(function_node, receiver, context, state_node);
+ Node* args[] = {receiver, context};
+ Node* call =
+ m.CallNWithFrameState(descriptor, function_node, args, state_node);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -414,14 +419,14 @@ TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
// Build frame state for the state before the call.
Node* parameters =
- m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
- Node* locals = m.NewNode(m.common()->TypedStateValues(&float64_type),
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
+ Node* locals = m.AddNode(m.common()->TypedStateValues(&float64_type),
m.Float64Constant(0.5));
- Node* stack = m.NewNode(m.common()->TypedStateValues(&tagged_type),
+ Node* stack = m.AddNode(m.common()->TypedStateValues(&tagged_type),
m.UndefinedConstant());
Node* context_sentinel = m.Int32Constant(0);
- Node* frame_state_before = m.NewNode(
+ Node* frame_state_before = m.AddNode(
m.common()->FrameState(bailout_id_before, OutputFrameStateCombine::Push(),
m.GetFrameStateFunctionInfo(1, 1)),
parameters, locals, stack, context_sentinel, function_node,
@@ -510,12 +515,12 @@ TARGET_TEST_F(InstructionSelectorTest,
// Build frame state for the state before the call.
Node* parameters =
- m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(63));
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(63));
Node* locals =
- m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(64));
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(64));
Node* stack =
- m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(65));
- Node* frame_state_parent = m.NewNode(
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(65));
+ Node* frame_state_parent = m.AddNode(
m.common()->FrameState(bailout_id_parent,
OutputFrameStateCombine::Ignore(),
m.GetFrameStateFunctionInfo(1, 1)),
@@ -523,12 +528,12 @@ TARGET_TEST_F(InstructionSelectorTest,
Node* context2 = m.Int32Constant(46);
Node* parameters2 =
- m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
- Node* locals2 = m.NewNode(m.common()->TypedStateValues(&float64_type),
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
+ Node* locals2 = m.AddNode(m.common()->TypedStateValues(&float64_type),
m.Float64Constant(0.25));
- Node* stack2 = m.NewNode(m.common()->TypedStateValues(&int32x2_type),
+ Node* stack2 = m.AddNode(m.common()->TypedStateValues(&int32x2_type),
m.Int32Constant(44), m.Int32Constant(45));
- Node* frame_state_before = m.NewNode(
+ Node* frame_state_before = m.AddNode(
m.common()->FrameState(bailout_id_before, OutputFrameStateCombine::Push(),
m.GetFrameStateFunctionInfo(1, 1)),
parameters2, locals2, stack2, context2, function_node,
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
index a869f7ebb1..48aa8f1ec4 100644
--- a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.cc
@@ -4,9 +4,11 @@
#include "test/unittests/compiler/interpreter-assembler-unittest.h"
+#include "src/code-factory.h"
#include "src/compiler/graph.h"
#include "src/compiler/node.h"
-#include "src/unique.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -23,6 +25,12 @@ const interpreter::Bytecode kBytecodes[] = {
};
+Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
+ return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
+ : IsInt32Constant(static_cast<int32_t>(value));
+}
+
+
Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
@@ -69,6 +77,14 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
}
+template <class... A>
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsCall(
+ const Matcher<const CallDescriptor*>& descriptor_matcher, A... args) {
+ return ::i::compiler::IsCall(descriptor_matcher, args..., graph()->start(),
+ graph()->start());
+}
+
+
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
int operand) {
@@ -130,11 +146,95 @@ TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
next_bytecode_offset_matcher,
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter),
graph->start(), graph->start()));
}
}
+TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
+ int jump_offsets[] = {-9710, -77, 0, +3, +97109};
+ TRACED_FOREACH(int, jump_offset, jump_offsets) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Jump(m.Int32Constant(jump_offset));
+ Graph* graph = m.GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(jump_offset));
+ Matcher<Node*> target_bytecode_matcher = m.IsLoad(
+ kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
+ EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
+ EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter),
+ graph->start(), graph->start()));
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
+ static const int kJumpIfTrueOffset = 73;
+
+ MachineOperatorBuilder machine(zone());
+
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* lhs = m.IntPtrConstant(0);
+ Node* rhs = m.IntPtrConstant(1);
+ m.JumpIfWordEqual(lhs, rhs, m.Int32Constant(kJumpIfTrueOffset));
+ Graph* graph = m.GetCompletedGraph();
+ Node* end = graph->end();
+ EXPECT_EQ(2, end->InputCount());
+
+ int jump_offsets[] = {kJumpIfTrueOffset,
+ interpreter::Bytecodes::Size(bytecode)};
+ for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(jump_offsets[i]));
+ Matcher<Node*> target_bytecode_matcher = m.IsLoad(
+ kMachUint8, IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher = m.IsLoad(
+ kMachPtr, IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+ EXPECT_THAT(
+ end->InputAt(i),
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter),
+ graph->start(), graph->start()));
+ }
+
+ // TODO(oth): test control flow paths.
+ }
+}
+
+
TARGET_TEST_F(InterpreterAssemblerTest, Return) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -147,9 +247,8 @@ TARGET_TEST_F(InterpreterAssemblerTest, Return) {
EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
- Matcher<Unique<HeapObject>> exit_trampoline(
- Unique<HeapObject>::CreateImmovable(
- isolate()->builtins()->InterpreterExitTrampoline()));
+ Handle<HeapObject> exit_trampoline =
+ isolate()->builtins()->InterpreterExitTrampoline();
EXPECT_THAT(
tail_call_node,
IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
@@ -158,6 +257,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, Return) {
IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter),
graph->start(), graph->start()));
}
}
@@ -169,6 +269,12 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
int number_of_operands = interpreter::Bytecodes::NumberOfOperands(bytecode);
for (int i = 0; i < number_of_operands; i++) {
switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
+ case interpreter::OperandType::kCount:
+ EXPECT_THAT(m.BytecodeOperandCount(i), m.IsBytecodeOperand(i));
+ break;
+ case interpreter::OperandType::kIdx:
+ EXPECT_THAT(m.BytecodeOperandIdx(i), m.IsBytecodeOperand(i));
+ break;
case interpreter::OperandType::kImm8:
EXPECT_THAT(m.BytecodeOperandImm8(i),
m.IsBytecodeOperandSignExtended(i));
@@ -216,6 +322,20 @@ TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
}
+TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* reg_location_node = m.RegisterLocation(reg_index_node);
+ EXPECT_THAT(
+ reg_location_node,
+ IsIntPtrAdd(
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ }
+}
+
+
TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
@@ -223,7 +343,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
Node* load_reg_node = m.LoadRegister(reg_index_node);
EXPECT_THAT(
load_reg_node,
- m.IsLoad(kMachPtr,
+ m.IsLoad(kMachAnyTagged,
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
}
@@ -238,7 +358,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
EXPECT_THAT(
store_reg_node,
- m.IsStore(StoreRepresentation(kMachPtr, kNoWriteBarrier),
+ m.IsStore(StoreRepresentation(kMachAnyTagged, kNoWriteBarrier),
IsParameter(Linkage::kInterpreterRegisterFileParameter),
IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
store_value));
@@ -257,6 +377,158 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
}
}
+
+TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* a = m.Int32Constant(0);
+ Node* b = m.Int32Constant(1);
+ Node* add = m.IntPtrAdd(a, b);
+ EXPECT_THAT(add, IsIntPtrAdd(a, b));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* a = m.Int32Constant(0);
+ Node* b = m.Int32Constant(1);
+ Node* add = m.IntPtrSub(a, b);
+ EXPECT_THAT(add, IsIntPtrSub(a, b));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* a = m.Int32Constant(0);
+ Node* add = m.WordShl(a, 10);
+ EXPECT_THAT(add, IsWordShl(a, IsInt32Constant(10)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* index = m.Int32Constant(2);
+ Node* load_constant = m.LoadConstantPoolEntry(index);
+ Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ kMachAnyTagged,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(kMachAnyTagged, constant_pool_matcher,
+ IsIntPtrAdd(
+ IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ IsWordShl(index, IsInt32Constant(kPointerSizeLog2)))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* load_from_current_context = m.LoadContextSlot(22);
+ Matcher<Node*> load_from_current_context_matcher = m.IsLoad(
+ kMachAnyTagged, IsParameter(Linkage::kInterpreterContextParameter),
+ IsIntPtrConstant(Context::SlotOffset(22)));
+ EXPECT_THAT(load_from_current_context, load_from_current_context_matcher);
+
+ // Let's imagine that the loaded context slot is another context.
+ Node* load_from_any_context =
+ m.LoadContextSlot(load_from_current_context, 23);
+ EXPECT_THAT(load_from_any_context,
+ m.IsLoad(kMachAnyTagged, load_from_current_context_matcher,
+ IsIntPtrConstant(Context::SlotOffset(23))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* object = m.IntPtrConstant(0xdeadbeef);
+ int offset = 16;
+ Node* load_field = m.LoadObjectField(object, offset);
+ EXPECT_THAT(load_field,
+ m.IsLoad(kMachAnyTagged, object,
+ IsIntPtrConstant(offset - kHeapObjectTag)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* arg1 = m.Int32Constant(2);
+ Node* arg2 = m.Int32Constant(3);
+ Node* call_runtime = m.CallRuntime(Runtime::kAdd, arg1, arg2);
+ EXPECT_THAT(call_runtime,
+ m.IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
+ IsParameter(Linkage::kInterpreterContextParameter)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallIC) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ LoadWithVectorDescriptor descriptor(isolate());
+ Node* target = m.Int32Constant(1);
+ Node* arg1 = m.Int32Constant(2);
+ Node* arg2 = m.Int32Constant(3);
+ Node* arg3 = m.Int32Constant(4);
+ Node* arg4 = m.Int32Constant(5);
+ Node* call_ic = m.CallIC(descriptor, target, arg1, arg2, arg3, arg4);
+ EXPECT_THAT(call_ic,
+ m.IsCall(_, target, arg1, arg2, arg3, arg4,
+ IsParameter(Linkage::kInterpreterContextParameter)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Callable builtin = CodeFactory::PushArgsAndCall(isolate());
+ Node* function = m.Int32Constant(0);
+ Node* first_arg = m.Int32Constant(1);
+ Node* arg_count = m.Int32Constant(2);
+ Node* call_js = m.CallJS(function, first_arg, arg_count);
+ EXPECT_THAT(
+ call_js,
+ m.IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
+ function, IsParameter(Linkage::kInterpreterContextParameter)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* feedback_vector = m.LoadTypeFeedbackVector();
+
+ Matcher<Node*> load_function_matcher = m.IsLoad(
+ kMachAnyTagged, IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsIntPtrConstant(
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Matcher<Node*> load_shared_function_info_matcher =
+ m.IsLoad(kMachAnyTagged, load_function_matcher,
+ IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag));
+
+ EXPECT_THAT(
+ feedback_vector,
+ m.IsLoad(kMachAnyTagged, load_shared_function_info_matcher,
+ IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag)));
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
index 64353ae128..0ed91eb401 100644
--- a/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/compiler/interpreter-assembler-unittest.h
@@ -38,6 +38,11 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher);
+ template <class... A>
+ Matcher<Node*> IsCall(
+ const Matcher<const CallDescriptor*>& descriptor_matcher,
+ A... args);
+
Matcher<Node*> IsBytecodeOperand(int operand);
Matcher<Node*> IsBytecodeOperandSignExtended(int operand);
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index b452ba56c5..9e0cee0d3d 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -6,6 +6,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/typer.h"
+#include "src/isolate-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -41,7 +42,7 @@ class JSBuiltinReducerTest : public TypedGraphTest {
JSObject::GetProperty(
m, isolate()->factory()->NewStringFromAsciiChecked(name))
.ToHandleChecked());
- return HeapConstant(Unique<JSFunction>::CreateUninitialized(f));
+ return HeapConstant(f);
}
JSOperatorBuilder* javascript() { return &javascript_; }
diff --git a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
index b52417de2f..4cc8f17509 100644
--- a/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-context-relaxation-unittest.cc
@@ -152,14 +152,11 @@ TEST_F(JSContextRelaxationTest,
Node* const input1 = Parameter(1);
Node* const context = Parameter(2);
Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateCatchContext(Unique<String>());
- Node* const frame_state_1 =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ const Operator* op = javascript()->CreateCatchContext(Handle<String>());
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Node* nested_context =
- graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
- frame_state_1, effect, control);
+ Node* nested_context = graph()->NewNode(
+ op, graph()->start(), graph()->start(), outer_context, effect, control);
Node* const frame_state_2 =
ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* node =
@@ -204,14 +201,12 @@ TEST_F(JSContextRelaxationTest,
Node* const input1 = Parameter(1);
Node* const context = Parameter(2);
Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateBlockContext();
- Node* const frame_state_1 =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::null();
+ const Operator* op = javascript()->CreateBlockContext(scope_info);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Node* nested_context =
- graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
- frame_state_1, effect, control);
+ graph()->NewNode(op, graph()->start(), outer_context, effect, control);
Node* const frame_state_2 =
ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* node =
@@ -230,14 +225,14 @@ TEST_F(JSContextRelaxationTest,
Node* const input1 = Parameter(1);
Node* const context = Parameter(2);
Node* const outer_context = Parameter(3);
- const Operator* op = javascript()->CreateScriptContext();
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::null();
+ const Operator* op = javascript()->CreateScriptContext(scope_info);
Node* const frame_state_1 =
ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Node* nested_context =
- graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
- frame_state_1, effect, control);
+ Node* nested_context = graph()->NewNode(op, graph()->start(), outer_context,
+ frame_state_1, effect, control);
Node* const frame_state_2 =
ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* node =
@@ -257,13 +252,10 @@ TEST_F(JSContextRelaxationTest,
Node* const context = Parameter(2);
Node* const outer_context = Parameter(3);
const Operator* op = javascript()->CreateModuleContext();
- Node* const frame_state_1 =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Node* nested_context =
- graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
- frame_state_1, effect, control);
+ Node* nested_context = graph()->NewNode(
+ op, graph()->start(), graph()->start(), outer_context, effect, control);
Node* const frame_state_2 =
ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* node =
@@ -283,13 +275,10 @@ TEST_F(JSContextRelaxationTest,
Node* const context = Parameter(2);
Node* const outer_context = Parameter(3);
const Operator* op = javascript()->CreateFunctionContext();
- Node* const frame_state_1 =
- ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* const effect = graph()->start();
Node* const control = graph()->start();
Node* nested_context =
- graph()->NewNode(op, graph()->start(), graph()->start(), outer_context,
- frame_state_1, effect, control);
+ graph()->NewNode(op, graph()->start(), outer_context, effect, control);
Node* const frame_state_2 =
ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
Node* node =
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 92be4e43e0..9d5b649471 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -126,23 +126,6 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsSmi) {
// -----------------------------------------------------------------------------
-// %_IsNonNegativeSmi
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineIsNonNegativeSmi) {
- Node* const input = Parameter(0);
- Node* const context = Parameter(1);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineIsNonNegativeSmi, 1), input,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsObjectIsNonNegativeSmi(input));
-}
-
-
-// -----------------------------------------------------------------------------
// %_IsArray
@@ -327,7 +310,7 @@ TEST_F(JSIntrinsicLoweringTest, Likely) {
graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineLikely, 1),
input, context, effect, control);
Node* const to_boolean =
- graph()->NewNode(javascript()->ToBoolean(), likely, context);
+ graph()->NewNode(javascript()->ToBoolean(), likely, context, effect);
Diamond d(graph(), common(), to_boolean);
graph()->SetEnd(graph()->NewNode(common()->End(1), d.merge));
@@ -422,7 +405,7 @@ TEST_F(JSIntrinsicLoweringTest, Unlikely) {
graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineUnlikely, 1),
input, context, effect, control);
Node* const to_boolean =
- graph()->NewNode(javascript()->ToBoolean(), unlikely, context);
+ graph()->NewNode(javascript()->ToBoolean(), unlikely, context, effect);
Diamond d(graph(), common(), to_boolean);
graph()->SetEnd(graph()->NewNode(common()->End(1), d.merge));
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 0f33ddea8d..a1f3973734 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -69,10 +69,10 @@ const SharedOperator kSharedOperators[] = {
}
SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(StrictEqual, Operator::kPure, 2, 0, 0, 0, 1, 0, 0),
- SHARED(StrictNotEqual, Operator::kPure, 2, 0, 0, 0, 1, 0, 0),
- SHARED(UnaryNot, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
- SHARED(ToBoolean, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
+ SHARED(StrictEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
+ SHARED(StrictNotEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
+ SHARED(UnaryNot, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
+ SHARED(ToBoolean, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
@@ -80,13 +80,11 @@ const SharedOperator kSharedOperators[] = {
SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
SHARED(Create, Operator::kEliminatable, 0, 0, 1, 0, 1, 1, 0),
SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
+ SHARED(TypeOf, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
SHARED(CreateWithContext, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
- SHARED(CreateScriptContext, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2)
#undef SHARED
};
diff --git a/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc b/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
index 251293ddcf..f3f1b733fc 100644
--- a/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-type-feedback-unittest.cc
@@ -82,18 +82,11 @@ class JSTypeFeedbackTest : public TypedGraphTest {
Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
- Unique<Name> name = Unique<Name>::CreateUninitialized(
- isolate()->factory()->InternalizeUtf8String(string));
+ Handle<Name> name = isolate()->factory()->InternalizeUtf8String(string);
const Operator* op = javascript()->LoadGlobal(name, feedback);
- Node* load = graph()->NewNode(op, context, global, vector, context);
- if (mode == JSTypeFeedbackSpecializer::kDeoptimizationEnabled) {
- for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(op);
- i++) {
- load->AppendInput(zone(), EmptyFrameState());
- }
- }
- load->AppendInput(zone(), effect);
- load->AppendInput(zone(), control);
+ Node* load =
+ graph()->NewNode(op, context, global, vector, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control);
Node* if_success = graph()->NewNode(common()->IfSuccess(), load);
return graph()->NewNode(common()->Return(), load, load, if_success);
}
@@ -193,10 +186,9 @@ TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstNumberWithDeoptimization) {
TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstString) {
- Unique<HeapObject> kValue = Unique<HeapObject>::CreateImmovable(
- isolate()->factory()->undefined_string());
+ Handle<HeapObject> kValue = isolate()->factory()->undefined_string();
const char* kName = "mango";
- SetGlobalProperty(kName, kValue.handle());
+ SetGlobalProperty(kName, kValue);
Node* ret = ReturnLoadNamedFromGlobal(
kName, graph()->start(), graph()->start(),
@@ -211,10 +203,9 @@ TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstString) {
TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstStringWithDeoptimization) {
- Unique<HeapObject> kValue = Unique<HeapObject>::CreateImmovable(
- isolate()->factory()->undefined_string());
+ Handle<HeapObject> kValue = isolate()->factory()->undefined_string();
const char* kName = "mango";
- SetGlobalProperty(kName, kValue.handle());
+ SetGlobalProperty(kName, kValue);
Node* ret = ReturnLoadNamedFromGlobal(
kName, graph()->start(), graph()->start(),
@@ -277,7 +268,7 @@ TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellSmiWithDeoptimization) {
HeapObjectMatcher cell(cell_capture.value());
EXPECT_TRUE(cell.HasValue());
- EXPECT_TRUE(cell.Value().handle()->IsPropertyCell());
+ EXPECT_TRUE(cell.Value()->IsPropertyCell());
EXPECT_THAT(ret,
IsReturn(load_field_match, load_field_match, graph()->start()));
@@ -329,7 +320,7 @@ TEST_F(JSTypeFeedbackTest,
HeapObjectMatcher cell(cell_capture.value());
EXPECT_TRUE(cell.HasValue());
- EXPECT_TRUE(cell.Value().handle()->IsPropertyCell());
+ EXPECT_TRUE(cell.Value()->IsPropertyCell());
EXPECT_THAT(ret,
IsReturn(load_field_match, load_field_match, graph()->start()));
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index 9d6cca3dbc..da964db23a 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -10,6 +10,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/isolate-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -88,7 +89,7 @@ class JSTypedLoweringTest : public TypedGraphTest {
Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBuffer(isolate(), buffer, true, bytes, byte_length);
+ JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length);
return buffer;
}
@@ -111,8 +112,8 @@ class JSTypedLoweringTest : public TypedGraphTest {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithBoolean) {
Node* input = Parameter(Type::Boolean(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsBooleanNot(input));
}
@@ -121,8 +122,8 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithBoolean) {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithOrderedNumber) {
Node* input = Parameter(Type::OrderedNumber(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberEqual(input, IsNumberConstant(0)));
}
@@ -150,8 +151,8 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithFalsish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -165,8 +166,8 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithTruish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -175,8 +176,8 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithTruish) {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1.0, 42.0, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -185,8 +186,8 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithNonZeroPlainNumber) {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithString) {
Node* input = Parameter(Type::String(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -199,8 +200,8 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithString) {
TEST_F(JSTypedLoweringTest, JSUnaryNotWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->UnaryNot(), input,
+ context, graph()->start()));
ASSERT_FALSE(r.Changed());
}
@@ -235,14 +236,12 @@ TEST_F(JSTypedLoweringTest, ParameterWithNull) {
{
Reduction r = Reduce(Parameter(Type::Constant(null, zone())));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(null)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(null));
}
{
Reduction r = Reduce(Parameter(Type::Null()));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(null)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(null));
}
}
@@ -291,14 +290,12 @@ TEST_F(JSTypedLoweringTest, ParameterWithUndefined) {
{
Reduction r = Reduce(Parameter(Type::Undefined()));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(undefined)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
{
Reduction r = Reduce(Parameter(Type::Constant(undefined, zone())));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(undefined)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
}
@@ -310,8 +307,8 @@ TEST_F(JSTypedLoweringTest, ParameterWithUndefined) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
Node* input = Parameter(Type::Boolean(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -339,8 +336,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -354,8 +351,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
zone()),
0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -364,8 +361,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithTruish) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
@@ -374,8 +371,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
Node* input = Parameter(Type::OrderedNumber(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
@@ -385,8 +382,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
Node* input = Parameter(Type::String(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -399,8 +396,8 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
+ context, graph()->start()));
ASSERT_FALSE(r.Changed());
}
@@ -432,8 +429,9 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithTheHole) {
Node* const context = UndefinedConstant();
TRACED_FOREACH(Type*, type, kJSTypes) {
Node* const lhs = Parameter(type);
- Reduction r = Reduce(
- graph()->NewNode(javascript()->StrictEqual(), lhs, the_hole, context));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, the_hole,
+ context, graph()->start(), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -445,7 +443,8 @@ TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
Node* const rhs = Parameter(Type::Unique(), 1);
Node* const context = Parameter(Type::Any(), 2);
Reduction r =
- Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, rhs, context));
+ Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, rhs, context,
+ graph()->start(), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsReferenceEqual(Type::Unique(), lhs, rhs));
}
@@ -745,13 +744,9 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
Node* control = graph()->start();
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = graph()->NewNode(op, base, key, value, vector, context);
- for (int i = 0;
- i < OperatorProperties::GetFrameStateInputCount(node->op()); i++) {
- node->AppendInput(zone(), EmptyFrameState());
- }
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
+ Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ EmptyFrameState(), EmptyFrameState(),
+ effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
@@ -793,13 +788,9 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Node* control = graph()->start();
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = graph()->NewNode(op, base, key, value, vector, context);
- for (int i = 0;
- i < OperatorProperties::GetFrameStateInputCount(node->op()); i++) {
- node->AppendInput(zone(), EmptyFrameState());
- }
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
+ Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ EmptyFrameState(), EmptyFrameState(),
+ effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
@@ -854,13 +845,9 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
Node* control = graph()->start();
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
- Node* node = graph()->NewNode(op, base, key, value, vector, context);
- for (int i = 0;
- i < OperatorProperties::GetFrameStateInputCount(node->op()); i++) {
- node->AppendInput(zone(), EmptyFrameState());
- }
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
+ Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ EmptyFrameState(), EmptyFrameState(),
+ effect, control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -885,8 +872,8 @@ TEST_F(JSTypedLoweringTest, JSLoadGlobalConstants) {
Handle<String>(isolate()->heap()->nan_string(), isolate()) // --
};
Matcher<Node*> matches[] = {
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- Handle<HeapObject>(isolate()->heap()->undefined_value(), isolate()))),
+ IsHeapConstant(
+ Handle<HeapObject>(isolate()->heap()->undefined_value(), isolate())),
IsNumberConstant(std::numeric_limits<double>::infinity()),
IsNumberConstant(IsNaN()) // --
};
@@ -899,9 +886,8 @@ TEST_F(JSTypedLoweringTest, JSLoadGlobalConstants) {
Node* control = graph()->start();
for (size_t i = 0; i < arraysize(names); i++) {
- Unique<Name> name = Unique<Name>::CreateImmovable(names[i]);
Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadGlobal(name, feedback), context, global, vector,
+ javascript()->LoadGlobal(names[i], feedback), context, global, vector,
context, EmptyFrameState(), EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
@@ -916,7 +902,7 @@ TEST_F(JSTypedLoweringTest, JSLoadGlobalConstants) {
TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
VectorSlotPair feedback;
- Unique<Name> name = Unique<Name>::CreateImmovable(factory()->length_string());
+ Handle<Name> name = factory()->length_string();
Node* const receiver = Parameter(Type::String(), 0);
Node* const vector = Parameter(Type::Internal(), 1);
Node* const context = UndefinedConstant();
@@ -1022,12 +1008,11 @@ TEST_F(JSTypedLoweringTest, JSAddWithString) {
rhs, context, frame_state0,
frame_state1, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsCall(_, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- CodeFactory::StringAdd(isolate(), STRING_ADD_CHECK_NONE,
- NOT_TENURED).code())),
- lhs, rhs, context, frame_state0, effect, control));
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(CodeFactory::StringAdd(
+ isolate(), STRING_ADD_CHECK_NONE,
+ NOT_TENURED).code()),
+ lhs, rhs, context, frame_state0, effect, control));
}
}
@@ -1043,16 +1028,13 @@ TEST_F(JSTypedLoweringTest, JSCreateClosure) {
Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
Reduction r =
Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
- context, context, effect, control));
+ context, effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsCall(_,
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- CodeFactory::FastNewClosure(isolate(), shared->language_mode(),
- shared->kind()).code())),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(shared)),
- effect, control));
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(CodeFactory::FastNewClosure(
+ isolate(), shared->language_mode(),
+ shared->kind()).code()),
+ IsHeapConstant(shared), effect, control));
}
@@ -1074,8 +1056,8 @@ TEST_F(JSTypedLoweringTest, JSCreateLiteralArray) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsCall(_, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- CodeFactory::FastCloneShallowArray(isolate()).code())),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastCloneShallowArray(isolate()).code()),
input0, input1, input2, context, frame_state, effect, control));
}
@@ -1098,8 +1080,8 @@ TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsCall(_, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- CodeFactory::FastCloneShallowObject(isolate(), 6).code())),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastCloneShallowObject(isolate(), 6).code()),
input0, input1, input2, _, context, frame_state, effect, control));
}
diff --git a/deps/v8/test/unittests/compiler/live-range-builder.h b/deps/v8/test/unittests/compiler/live-range-builder.h
new file mode 100644
index 0000000000..e5f05ebbcb
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/live-range-builder.h
@@ -0,0 +1,78 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIVE_RANGE_BUILDER_H_
+#define V8_LIVE_RANGE_BUILDER_H_
+
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Utility offering shorthand syntax for building up a range by providing its ID
+// and pairs (start, end) specifying intervals. Circumvents current incomplete
+// support for C++ features such as instantiation lists, on OS X and Android.
+class TestRangeBuilder {
+ public:
+ explicit TestRangeBuilder(Zone* zone)
+ : id_(-1), pairs_(), uses_(), zone_(zone) {}
+
+ TestRangeBuilder& Id(int id) {
+ id_ = id;
+ return *this;
+ }
+ TestRangeBuilder& Add(int start, int end) {
+ pairs_.push_back({start, end});
+ return *this;
+ }
+
+ TestRangeBuilder& AddUse(int pos) {
+ uses_.insert(pos);
+ return *this;
+ }
+
+ TopLevelLiveRange* Build(int start, int end) {
+ return Add(start, end).Build();
+ }
+
+ TopLevelLiveRange* Build() {
+ TopLevelLiveRange* range =
+ new (zone_) TopLevelLiveRange(id_, MachineType::kRepTagged);
+ // Traverse the provided interval specifications backwards, because that is
+ // what LiveRange expects.
+ for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
+ Interval pair = pairs_[i];
+ LifetimePosition start = LifetimePosition::FromInt(pair.first);
+ LifetimePosition end = LifetimePosition::FromInt(pair.second);
+ CHECK(start < end);
+ range->AddUseInterval(start, end, zone_);
+ }
+ for (int pos : uses_) {
+ UsePosition* use_position =
+ new (zone_) UsePosition(LifetimePosition::FromInt(pos), nullptr,
+ nullptr, UsePositionHintType::kNone);
+ range->AddUsePosition(use_position);
+ }
+
+ pairs_.clear();
+ return range;
+ }
+
+ private:
+ typedef std::pair<int, int> Interval;
+ typedef std::vector<Interval> IntervalList;
+ int id_;
+ IntervalList pairs_;
+ std::set<int> uses_;
+ Zone* zone_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LIVE_RANGE_BUILDER_H_
diff --git a/deps/v8/test/unittests/compiler/live-range-unittest.cc b/deps/v8/test/unittests/compiler/live-range-unittest.cc
new file mode 100644
index 0000000000..886a8121c7
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/live-range-unittest.cc
@@ -0,0 +1,441 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "test/unittests/compiler/live-range-builder.h"
+#include "test/unittests/test-utils.h"
+
+
+// TODO(mtrofin): would we want to centralize this definition?
+#ifdef DEBUG
+#define V8_ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH_IF_SUPPORTED(statement, regex)
+#define DISABLE_IN_RELEASE(Name) Name
+
+#else
+#define V8_ASSERT_DEBUG_DEATH(statement, regex) statement
+#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
+#endif // DEBUG
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LiveRangeUnitTest : public TestWithZone {
+ public:
+ // Split helper, to avoid int->LifetimePosition conversion nuisance.
+ LiveRange* Split(LiveRange* range, int pos) {
+ return range->SplitAt(LifetimePosition::FromInt(pos), zone());
+ }
+
+
+ TopLevelLiveRange* Splinter(TopLevelLiveRange* top, int start, int end,
+ int new_id = 0) {
+ TopLevelLiveRange* ret =
+ new (zone()) TopLevelLiveRange(new_id, MachineType::kRepTagged);
+ top->Splinter(LifetimePosition::FromInt(start),
+ LifetimePosition::FromInt(end), ret, zone());
+ return ret;
+ }
+
+ // Ranges first and second match structurally.
+ bool RangesMatch(LiveRange* first, LiveRange* second) {
+ if (first->Start() != second->Start() || first->End() != second->End()) {
+ return false;
+ }
+ UseInterval* i1 = first->first_interval();
+ UseInterval* i2 = second->first_interval();
+
+ while (i1 != nullptr && i2 != nullptr) {
+ if (i1->start() != i2->start() || i1->end() != i2->end()) return false;
+ i1 = i1->next();
+ i2 = i2->next();
+ }
+ if (i1 != nullptr || i2 != nullptr) return false;
+
+ UsePosition* p1 = first->first_pos();
+ UsePosition* p2 = second->first_pos();
+
+ while (p1 != nullptr && p2 != nullptr) {
+ if (p1->pos() != p2->pos()) return false;
+ p1 = p1->next();
+ p2 = p2->next();
+ }
+ if (p1 != nullptr || p2 != nullptr) return false;
+ return true;
+ }
+};
+
+
+TEST_F(LiveRangeUnitTest, InvalidConstruction) {
+ // Build a range manually, because the builder guards against empty cases.
+ TopLevelLiveRange* range =
+ new (zone()) TopLevelLiveRange(1, MachineType::kRepTagged);
+ V8_ASSERT_DEBUG_DEATH(
+ range->AddUseInterval(LifetimePosition::FromInt(0),
+ LifetimePosition::FromInt(0), zone()),
+ ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitInvalidStart) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
+ V8_ASSERT_DEBUG_DEATH(Split(range, 0), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(InvalidSplitEnd)) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
+ ASSERT_DEATH_IF_SUPPORTED(Split(range, 1), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(SplitInvalidPreStart)) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(1, 2);
+ ASSERT_DEATH_IF_SUPPORTED(Split(range, 0), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(SplitInvalidPostEnd)) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
+ ASSERT_DEATH_IF_SUPPORTED(Split(range, 2), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitSingleIntervalNoUsePositions) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 2);
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top = TestRangeBuilder(zone()).Build(0, 1);
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(1, 2);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsBetween) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
+ LiveRange* child = Split(range, 3);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top = TestRangeBuilder(zone()).Build(0, 2);
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(4, 6);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsFront) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top = TestRangeBuilder(zone()).Build(0, 1);
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(1, 2).Add(4, 6).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsAfter) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
+ LiveRange* child = Split(range, 5);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 5).Build();
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(5, 6);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositions) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).AddUse(0).AddUse(2).Build();
+
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 1).AddUse(0).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(1, 3).AddUse(2).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositionsAtPos) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).AddUse(0).AddUse(2).Build();
+
+ LiveRange* child = Split(range, 2);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).AddUse(0).AddUse(2).Build();
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(2, 3);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsBetween) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
+ LiveRange* child = Split(range, 3);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).AddUse(1).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(4, 6).AddUse(5).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAtInterval) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(4).Build();
+ LiveRange* child = Split(range, 4);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).AddUse(1).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(4, 6).AddUse(4).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsFront) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 1).AddUse(1).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(1, 2).Add(4, 6).AddUse(5).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAfter) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
+ LiveRange* child = Split(range, 5);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 5).AddUse(1).AddUse(5).Build();
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(5, 6);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterSingleInterval) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 6);
+ TopLevelLiveRange* splinter = Splinter(range, 3, 5);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 6).Build();
+ TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(3, 5);
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeSingleInterval) {
+ TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 6);
+ TopLevelLiveRange* splinter = Splinter(original, 3, 5);
+
+ original->Merge(splinter, zone());
+ TopLevelLiveRange* result = TestRangeBuilder(zone()).Build(0, 6);
+ LiveRange* child_1 = Split(result, 3);
+ Split(child_1, 5);
+
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsOutside) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(range, 2, 6);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 2).Add(6, 8).Build();
+ TopLevelLiveRange* expected_splinter =
+ TestRangeBuilder(zone()).Add(2, 3).Add(5, 6).Build();
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsOutside) {
+ TopLevelLiveRange* original =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(original, 2, 6);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ LiveRange* child_1 = Split(result, 2);
+ Split(child_1, 6);
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsInside) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ V8_ASSERT_DEBUG_DEATH(Splinter(range, 3, 5), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsLeft) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(range, 2, 4);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 2).Add(5, 8).Build();
+ TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(2, 3);
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsLeft) {
+ TopLevelLiveRange* original =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(original, 2, 4);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ Split(result, 2);
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsRight) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(range, 4, 6);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 3).Add(6, 8).Build();
+ TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(5, 6);
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsRight) {
+ TopLevelLiveRange* original =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(original, 4, 6);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ LiveRange* child_1 = Split(result, 5);
+ Split(child_1, 6);
+
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeAfterSplitting) {
+ TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 8);
+ TopLevelLiveRange* splinter = Splinter(original, 4, 6);
+ LiveRange* original_child = Split(original, 2);
+ Split(original_child, 7);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result = TestRangeBuilder(zone()).Build(0, 8);
+ LiveRange* child_1 = Split(result, 2);
+ LiveRange* child_2 = Split(child_1, 4);
+ LiveRange* child_3 = Split(child_2, 6);
+ Split(child_3, 7);
+
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, IDGeneration) {
+ TopLevelLiveRange* vreg = TestRangeBuilder(zone()).Id(2).Build(0, 100);
+ EXPECT_EQ(2, vreg->vreg());
+ EXPECT_EQ(0, vreg->relative_id());
+
+ TopLevelLiveRange* splinter =
+ new (zone()) TopLevelLiveRange(101, MachineType::kRepTagged);
+ vreg->Splinter(LifetimePosition::FromInt(4), LifetimePosition::FromInt(12),
+ splinter, zone());
+
+ EXPECT_EQ(101, splinter->vreg());
+ EXPECT_EQ(1, splinter->relative_id());
+
+ LiveRange* child = vreg->SplitAt(LifetimePosition::FromInt(50), zone());
+
+ EXPECT_EQ(2, child->relative_id());
+
+ LiveRange* splinter_child =
+ splinter->SplitAt(LifetimePosition::FromInt(8), zone());
+
+ EXPECT_EQ(1, splinter->relative_id());
+ EXPECT_EQ(3, splinter_child->relative_id());
+
+ vreg->Merge(splinter, zone());
+ EXPECT_EQ(1, splinter->relative_id());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
index 3c94c25887..9016e04e25 100644
--- a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -64,9 +64,10 @@ class LivenessAnalysisTest : public GraphTest {
const Operator* op = common()->FrameState(
BailoutId(ast_num), OutputFrameStateCombine::Ignore(), state_info);
- Node* result = graph()->NewNode(op, empty_values_, locals, empty_values_,
- jsgraph()->UndefinedConstant(),
- jsgraph()->UndefinedConstant());
+ Node* result =
+ graph()->NewNode(op, empty_values_, locals, empty_values_,
+ jsgraph()->UndefinedConstant(),
+ jsgraph()->UndefinedConstant(), graph()->start());
current_block_->Checkpoint(result);
return result;
diff --git a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
index c725a27cc0..f1dac8bb64 100644
--- a/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
+++ b/deps/v8/test/unittests/compiler/loop-peeling-unittest.cc
@@ -468,7 +468,7 @@ TEST_F(LoopPeelingTest, TwoExitLoop_nope) {
const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
- 0, 0, 1, 1, 0, 2);
+ 0, 0, 1, 1, 1, 2);
TEST_F(LoopPeelingTest, TwoExitLoopWithCall_nope) {
@@ -479,7 +479,7 @@ TEST_F(LoopPeelingTest, TwoExitLoopWithCall_nope) {
Node* call = graph()->NewNode(&kMockCall, b1.if_true);
Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
Node* if_exception = graph()->NewNode(
- common()->IfException(IfExceptionHint::kLocallyUncaught), call);
+ common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
loop->ReplaceInput(1, if_success);
Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, if_exception);
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index b14e9d392d..13a5b6f3bb 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/typer.h"
+#include "src/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -1453,6 +1454,19 @@ TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Conversions) {
}
+TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Constant) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64Equal(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Equal(p0, IsFloat32Constant(x)));
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Float64LessThan
@@ -1469,6 +1483,30 @@ TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Conversions) {
}
+TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Constant) {
+ Node* const p0 = Parameter(0);
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThan(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThan(p0, IsFloat32Constant(x)));
+ }
+ }
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThan(), Float64Constant(x),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThan(IsFloat32Constant(x), p0));
+ }
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Float64LessThanOrEqual
@@ -1486,12 +1524,38 @@ TEST_F(MachineOperatorReducerTest,
}
+TEST_F(MachineOperatorReducerTest, Float64LessThanOrEqualWithFloat32Constant) {
+ Node* const p0 = Parameter(0);
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThanOrEqual(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat32LessThanOrEqual(p0, IsFloat32Constant(x)));
+ }
+ }
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThanOrEqual(), Float64Constant(x),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat32LessThanOrEqual(IsFloat32Constant(x), p0));
+ }
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Store
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
- const StoreRepresentation rep(kRepWord8, kNoWriteBarrier);
+ const StoreRepresentation rep(kMachUint8, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1513,7 +1577,7 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32SarAndWord32Shl) {
- const StoreRepresentation rep(kRepWord8, kNoWriteBarrier);
+ const StoreRepresentation rep(kMachUint8, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1537,7 +1601,7 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32SarAndWord32Shl) {
TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
- const StoreRepresentation rep(kRepWord16, kNoWriteBarrier);
+ const StoreRepresentation rep(kMachUint16, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1559,7 +1623,7 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32SarAndWord32Shl) {
- const StoreRepresentation rep(kRepWord16, kNoWriteBarrier);
+ const StoreRepresentation rep(kMachUint16, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index fca53e2bd8..f49fbd7b03 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -32,11 +32,10 @@ namespace {
const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
-const MachineType kMachineTypes[] = {
- kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
- kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
- kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+const MachineType kMachineTypesForAccess[] = {
+ kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
+ kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
+ kMachPtr, kMachAnyTagged, kMachPtr};
} // namespace
@@ -84,9 +83,10 @@ TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
}
-INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
- ::testing::Combine(::testing::ValuesIn(kMachineReps),
- ::testing::ValuesIn(kMachineTypes)));
+INSTANTIATE_TEST_CASE_P(
+ MachineOperatorTest, MachineLoadOperatorTest,
+ ::testing::Combine(::testing::ValuesIn(kMachineReps),
+ ::testing::ValuesIn(kMachineTypesForAccess)));
// -----------------------------------------------------------------------------
@@ -146,7 +146,7 @@ INSTANTIATE_TEST_CASE_P(
MachineOperatorTest, MachineStoreOperatorTest,
::testing::Combine(
::testing::ValuesIn(kMachineReps),
- ::testing::Combine(::testing::ValuesIn(kMachineTypes),
+ ::testing::Combine(::testing::ValuesIn(kMachineTypesForAccess),
::testing::Values(kNoWriteBarrier,
kFullWriteBarrier))));
#endif
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 1dbaa9f60a..b88f4695c8 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -497,8 +497,8 @@ static const MemoryAccess kMemoryAccesses[] = {
{kMachInt16, kMipsLh, kMipsSh},
{kMachUint16, kMipsLhu, kMipsSh},
{kMachInt32, kMipsLw, kMipsSw},
- {kRepFloat32, kMipsLwc1, kMipsSwc1},
- {kRepFloat64, kMipsLdc1, kMipsSdc1}};
+ {kMachFloat32, kMipsLwc1, kMipsSwc1},
+ {kMachFloat64, kMipsLdc1, kMipsSdc1}};
struct MemoryAccessImm {
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index 00343d2023..fe57eb5e28 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -444,6 +444,30 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
+TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(m.TruncateInt64ToInt32(
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dsar, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32);
+ m.Return(
+ m.Word64Shl(m.ChangeInt32ToInt64(m.Parameter(0)), m.Int32Constant(32)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dshl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
// ----------------------------------------------------------------------------
// Loads and stores.
@@ -464,8 +488,8 @@ static const MemoryAccess kMemoryAccesses[] = {
{kMachInt16, kMips64Lh, kMips64Sh},
{kMachUint16, kMips64Lhu, kMips64Sh},
{kMachInt32, kMips64Lw, kMips64Sw},
- {kRepFloat32, kMips64Lwc1, kMips64Swc1},
- {kRepFloat64, kMips64Ldc1, kMips64Sdc1},
+ {kMachFloat32, kMips64Lwc1, kMips64Swc1},
+ {kMachFloat64, kMips64Ldc1, kMips64Sdc1},
{kMachInt64, kMips64Ld, kMips64Sd}};
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index d097ee4b66..2ca1b78d09 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -11,7 +11,7 @@
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
-#include "src/unique.h"
+#include "src/handles-inl.h"
using testing::_;
using testing::MakeMatcher;
@@ -21,6 +21,11 @@ using testing::StringMatchResultListener;
namespace v8 {
namespace internal {
+
+bool operator==(Handle<HeapObject> const& lhs, Handle<HeapObject> const& rhs) {
+ return lhs.is_identical_to(rhs);
+}
+
namespace compiler {
namespace {
@@ -407,8 +412,8 @@ class IsSelectMatcher final : public NodeMatcher {
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
- type_matcher_, listener) &&
+ PrintMatchAndExplain(OpParameter<SelectParameters>(node).type(),
+ "type", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value0", value0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
@@ -636,7 +641,7 @@ class IsProjectionMatcher final : public NodeMatcher {
class IsCallMatcher final : public NodeMatcher {
public:
- IsCallMatcher(const Matcher<CallDescriptor*>& descriptor_matcher,
+ IsCallMatcher(const Matcher<const CallDescriptor*>& descriptor_matcher,
const std::vector<Matcher<Node*>>& value_matchers,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher)
@@ -665,8 +670,8 @@ class IsCallMatcher final : public NodeMatcher {
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
if (!NodeMatcher::MatchAndExplain(node, listener) ||
- !PrintMatchAndExplain(OpParameter<CallDescriptor*>(node), "descriptor",
- descriptor_matcher_, listener)) {
+ !PrintMatchAndExplain(OpParameter<const CallDescriptor*>(node),
+ "descriptor", descriptor_matcher_, listener)) {
return false;
}
for (size_t i = 0; i < value_matchers_.size(); ++i) {
@@ -685,7 +690,7 @@ class IsCallMatcher final : public NodeMatcher {
}
private:
- const Matcher<CallDescriptor*> descriptor_matcher_;
+ const Matcher<const CallDescriptor*> descriptor_matcher_;
const std::vector<Matcher<Node*>> value_matchers_;
const Matcher<Node*> effect_matcher_;
const Matcher<Node*> control_matcher_;
@@ -1528,10 +1533,9 @@ Matcher<Node*> IsExternalConstant(
}
-Matcher<Node*> IsHeapConstant(
- const Matcher<Unique<HeapObject> >& value_matcher) {
- return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
- IrOpcode::kHeapConstant, value_matcher));
+Matcher<Node*> IsHeapConstant(Handle<HeapObject> value) {
+ return MakeMatcher(new IsConstantMatcher<Handle<HeapObject>>(
+ IrOpcode::kHeapConstant, value));
}
@@ -1614,20 +1618,52 @@ Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
}
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
std::vector<Matcher<Node*>> value_matchers;
value_matchers.push_back(value0_matcher);
value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
effect_matcher, control_matcher));
}
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -1646,7 +1682,7 @@ Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
}
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -1668,7 +1704,7 @@ Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
Matcher<Node*> IsCall(
- const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
@@ -1766,6 +1802,48 @@ Matcher<Node*> IsTailCall(
}
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ value_matchers.push_back(value6_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& value7_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ value_matchers.push_back(value6_matcher);
+ value_matchers.push_back(value7_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
@@ -1930,6 +2008,7 @@ IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
IS_BINOP_MATCHER(Int64Add)
IS_BINOP_MATCHER(Int64Sub)
+IS_BINOP_MATCHER(JSAdd)
IS_BINOP_MATCHER(Float32Max)
IS_BINOP_MATCHER(Float32Min)
IS_BINOP_MATCHER(Float32Equal)
@@ -1968,7 +2047,6 @@ IS_UNOP_MATCHER(Float64ExtractHighWord32)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(ObjectIsSmi)
-IS_UNOP_MATCHER(ObjectIsNonNegativeSmi)
IS_UNOP_MATCHER(Word32Clz)
#undef IS_UNOP_MATCHER
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 149dcfc439..7042d9943b 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -14,9 +14,9 @@ namespace internal {
// Forward declarations.
class ExternalReference;
+template <typename T>
+class Handle;
class HeapObject;
-template <class T>
-class Unique;
template <class>
class TypeImpl;
struct ZoneTypeConfig;
@@ -73,8 +73,7 @@ Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsExternalConstant(
const Matcher<ExternalReference>& value_matcher);
-Matcher<Node*> IsHeapConstant(
- const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsHeapConstant(Handle<HeapObject> value);
Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
@@ -100,12 +99,25 @@ Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher);
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher);
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -113,7 +125,7 @@ Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value4_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
@@ -123,7 +135,7 @@ Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsCall(
- const Matcher<CallDescriptor*>& descriptor_matcher,
+ const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
@@ -158,6 +170,21 @@ Matcher<Node*> IsTailCall(
const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& value7_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
@@ -215,7 +242,6 @@ Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsObjectIsSmi(const Matcher<Node*>& value_matcher);
-Matcher<Node*> IsObjectIsNonNegativeSmi(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
@@ -269,6 +295,8 @@ Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index 954541b721..45d555d7a1 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -137,7 +137,7 @@ const Operator kHeapConstant(IrOpcode::kHeapConstant, Operator::kPure,
const Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
0, 1, 0, 0);
const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
- 0, 0, 1, 1, 0, 2);
+ 0, 0, 1, 1, 1, 2);
const Operator kMockTailCall(IrOpcode::kTailCall, Operator::kNoProperties,
"MockTailCall", 1, 1, 1, 0, 0, 1);
@@ -649,31 +649,6 @@ TEST_F(SchedulerTest, BuildScheduleOneParameter) {
}
-TEST_F(SchedulerTest, BuildScheduleIfSplit) {
- graph()->SetStart(graph()->NewNode(common()->Start(5)));
-
- Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
- Node* p2 = graph()->NewNode(common()->Parameter(1), graph()->start());
- Node* p3 = graph()->NewNode(common()->Parameter(2), graph()->start());
- Node* p4 = graph()->NewNode(common()->Parameter(3), graph()->start());
- Node* p5 = graph()->NewNode(common()->Parameter(4), graph()->start());
- Node* cmp =
- graph()->NewNode(js()->LessThanOrEqual(LanguageMode::SLOPPY), p1, p2, p3,
- p4, p5, graph()->start(), graph()->start());
- Node* branch = graph()->NewNode(common()->Branch(), cmp, graph()->start());
- Node* true_branch = graph()->NewNode(common()->IfTrue(), branch);
- Node* false_branch = graph()->NewNode(common()->IfFalse(), branch);
-
- Node* ret1 =
- graph()->NewNode(common()->Return(), p4, graph()->start(), true_branch);
- Node* ret2 =
- graph()->NewNode(common()->Return(), p5, graph()->start(), false_branch);
- graph()->SetEnd(graph()->NewNode(common()->End(2), ret1, ret2));
-
- ComputeAndVerifySchedule(13);
-}
-
-
namespace {
Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common, Node* cond) {
@@ -755,7 +730,7 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamonds) {
Node* map = graph()->NewNode(
simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), p0, p0,
- p0, start, f);
+ start, f);
Node* br1 = graph()->NewNode(common()->Branch(), map, graph()->start());
Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index b625d12888..79e530f228 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -7,7 +7,7 @@
#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
-#include "src/conversions.h"
+#include "src/conversions-inl.h"
#include "src/types.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 07728913b1..7e4329ea3a 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -51,9 +51,6 @@ const PureOperator kPureOperators[] = {
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
PURE(PlainPrimitiveToNumber, Operator::kNoProperties, 1),
- PURE(StringEqual, Operator::kCommutative, 2),
- PURE(StringLessThan, Operator::kNoProperties, 2),
- PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
@@ -62,8 +59,7 @@ const PureOperator kPureOperators[] = {
PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
PURE(ChangeBitToBool, Operator::kNoProperties, 1),
- PURE(ObjectIsSmi, Operator::kNoProperties, 1),
- PURE(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
+ PURE(ObjectIsSmi, Operator::kNoProperties, 1)
#undef PURE
};
diff --git a/deps/v8/test/unittests/compiler/typer-unittest.cc b/deps/v8/test/unittests/compiler/typer-unittest.cc
index f977c6fddd..7113bf2eec 100644
--- a/deps/v8/test/unittests/compiler/typer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/typer-unittest.cc
@@ -7,6 +7,7 @@
#include "src/codegen.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "test/cctest/types-fuzz.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -58,11 +59,26 @@ class TyperTest : public TypedGraphTest {
Type* TypeBinaryOp(const Operator* op, Type* lhs, Type* rhs) {
Node* p0 = Parameter(0);
Node* p1 = Parameter(1);
- NodeProperties::SetBounds(p0, Bounds(lhs));
- NodeProperties::SetBounds(p1, Bounds(rhs));
- Node* n = graph()->NewNode(op, p0, p1, context_node_, graph()->start(),
- graph()->start());
- return NodeProperties::GetBounds(n).upper;
+ NodeProperties::SetType(p0, lhs);
+ NodeProperties::SetType(p1, rhs);
+ std::vector<Node*> inputs;
+ inputs.push_back(p0);
+ inputs.push_back(p1);
+ if (OperatorProperties::HasContextInput(op)) {
+ inputs.push_back(context_node_);
+ }
+ for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(op); i++) {
+ inputs.push_back(EmptyFrameState());
+ }
+ for (int i = 0; i < op->EffectInputCount(); i++) {
+ inputs.push_back(graph()->start());
+ }
+ for (int i = 0; i < op->ControlInputCount(); i++) {
+ inputs.push_back(graph()->start());
+ }
+ Node* n = graph()->NewNode(op, static_cast<int>(inputs.size()),
+ &(inputs.front()));
+ return NodeProperties::GetType(n);
}
Type* RandomRange(bool int32 = false) {
@@ -196,9 +212,7 @@ class TyperTest : public TypedGraphTest {
Type* type2 = types_.Fuzz();
Type* type = TypeBinaryOp(op, type1, type2);
Type* subtype1 = RandomSubtype(type1);
- ;
Type* subtype2 = RandomSubtype(type2);
- ;
Type* subtype = TypeBinaryOp(op, subtype1, subtype2);
EXPECT_TRUE(subtype->Is(type));
}
@@ -399,7 +413,7 @@ TEST_F(TyperTest, TypeRegressInt32Constant) {
int values[] = {-5, 10};
for (auto i : values) {
Node* c = graph()->NewNode(common()->Int32Constant(i));
- Type* type = NodeProperties::GetBounds(c).upper;
+ Type* type = NodeProperties::GetType(c);
EXPECT_TRUE(type->Is(NewRange(i, i)));
}
}
diff --git a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
index 5ea375ff19..c003033940 100644
--- a/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -44,8 +44,8 @@ class ValueNumberingReducerTest : public TestWithZone {
TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
Node* na = graph()->NewNode(&kOp0);
Node* nb = graph()->NewNode(&kOp0);
- Node* n1 = graph()->NewNode(&kOp0, na);
- Node* n2 = graph()->NewNode(&kOp0, nb);
+ Node* n1 = graph()->NewNode(&kOp1, na);
+ Node* n2 = graph()->NewNode(&kOp1, nb);
EXPECT_FALSE(Reduce(n1).Changed());
EXPECT_FALSE(Reduce(n2).Changed());
}
@@ -73,8 +73,7 @@ TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
static const size_t kMaxInputCount = 16;
Node* inputs[kMaxInputCount];
for (size_t i = 0; i < arraysize(inputs); ++i) {
- Operator::Opcode opcode = static_cast<Operator::Opcode>(
- std::numeric_limits<Operator::Opcode>::max() - i);
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(kMaxInputCount + i);
inputs[i] = graph()->NewNode(
new (zone()) TestOperator(opcode, Operator::kIdempotent, 0, 1));
}
@@ -99,8 +98,7 @@ TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
static const size_t kMaxInputCount = 16;
Node* inputs[kMaxInputCount];
for (size_t i = 0; i < arraysize(inputs); ++i) {
- Operator::Opcode opcode = static_cast<Operator::Opcode>(
- std::numeric_limits<Operator::Opcode>::max() - i);
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(2 + i);
inputs[i] = graph()->NewNode(
new (zone()) TestOperator(opcode, Operator::kIdempotent, 0, 1));
}
diff --git a/deps/v8/test/unittests/heap/bitmap-unittest.cc b/deps/v8/test/unittests/heap/bitmap-unittest.cc
new file mode 100644
index 0000000000..a84437d534
--- /dev/null
+++ b/deps/v8/test/unittests/heap/bitmap-unittest.cc
@@ -0,0 +1,107 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/spaces.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+using v8::internal::Bitmap;
+
+class BitmapTest : public ::testing::Test {
+ public:
+ static const uint32_t kBlackCell;
+ static const uint32_t kWhiteCell;
+ static const uint32_t kBlackByte;
+ static const uint32_t kWhiteByte;
+
+ BitmapTest() : memory_(new uint8_t[Bitmap::kSize]) {
+ memset(memory_, 0, Bitmap::kSize);
+ }
+
+ virtual ~BitmapTest() { delete[] memory_; }
+
+ Bitmap* bitmap() { return reinterpret_cast<Bitmap*>(memory_); }
+ uint8_t* raw_bitmap() { return memory_; }
+
+ private:
+ uint8_t* memory_;
+};
+
+
+const uint32_t BitmapTest::kBlackCell = 0xAAAAAAAA;
+const uint32_t BitmapTest::kWhiteCell = 0x00000000;
+const uint32_t BitmapTest::kBlackByte = 0xAA;
+const uint32_t BitmapTest::kWhiteByte = 0x00;
+
+
+TEST_F(BitmapTest, IsZeroInitialized) {
+ // We require all tests to start from a zero-initialized bitmap. Manually
+ // verify this invariant here.
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ EXPECT_EQ(raw_bitmap()[i], kWhiteByte);
+ }
+}
+
+
+TEST_F(BitmapTest, Cells) {
+ Bitmap* bm = bitmap();
+ bm->cells()[1] = kBlackCell;
+ uint8_t* raw = raw_bitmap();
+ int second_cell_base = Bitmap::kBytesPerCell;
+ for (size_t i = 0; i < Bitmap::kBytesPerCell; i++) {
+ EXPECT_EQ(raw[second_cell_base + i], kBlackByte);
+ }
+}
+
+
+TEST_F(BitmapTest, CellsCount) {
+ int last_cell_index = bitmap()->CellsCount() - 1;
+ bitmap()->cells()[last_cell_index] = kBlackCell;
+ // Manually verify on raw memory.
+ uint8_t* raw = raw_bitmap();
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ // Last cell should be set.
+ if (i >= (Bitmap::kSize - Bitmap::kBytesPerCell)) {
+ EXPECT_EQ(raw[i], kBlackByte);
+ } else {
+ EXPECT_EQ(raw[i], kWhiteByte);
+ }
+ }
+}
+
+
+TEST_F(BitmapTest, IsClean) {
+ Bitmap* bm = bitmap();
+ EXPECT_TRUE(bm->IsClean());
+ bm->cells()[0] = kBlackCell;
+ EXPECT_FALSE(bm->IsClean());
+}
+
+
+TEST_F(BitmapTest, ClearRange1) {
+ Bitmap* bm = bitmap();
+ bm->cells()[0] = kBlackCell;
+ bm->cells()[1] = kBlackCell;
+ bm->cells()[2] = kBlackCell;
+ bm->ClearRange(0, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ EXPECT_EQ(bm->cells()[0], kWhiteCell);
+ EXPECT_EQ(bm->cells()[1], 0xAAAA0000);
+ EXPECT_EQ(bm->cells()[2], kBlackCell);
+}
+
+
+TEST_F(BitmapTest, ClearRange2) {
+ Bitmap* bm = bitmap();
+ bm->cells()[0] = kBlackCell;
+ bm->cells()[1] = kBlackCell;
+ bm->cells()[2] = kBlackCell;
+ bm->ClearRange(Bitmap::kBitsPerCell,
+ Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ EXPECT_EQ(bm->cells()[0], kBlackCell);
+ EXPECT_EQ(bm->cells()[1], 0xAAAA0000);
+ EXPECT_EQ(bm->cells()[2], kBlackCell);
+}
+
+} // namespace
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index e74152a5fd..6413e363f3 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -19,30 +19,17 @@ class GCIdleTimeHandlerTest : public ::testing::Test {
GCIdleTimeHandler* handler() { return &handler_; }
- GCIdleTimeHandler::HeapState DefaultHeapState() {
- GCIdleTimeHandler::HeapState result;
+ GCIdleTimeHeapState DefaultHeapState() {
+ GCIdleTimeHeapState result;
result.contexts_disposed = 0;
result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
- result.size_of_objects = kSizeOfObjects;
result.incremental_marking_stopped = false;
- result.sweeping_in_progress = false;
- result.sweeping_completed = false;
- result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
- result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
- result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
- result.used_new_space_size = 0;
- result.new_space_capacity = kNewSpaceCapacity;
- result.new_space_allocation_throughput_in_bytes_per_ms =
- kNewSpaceAllocationThroughput;
return result;
}
static const size_t kSizeOfObjects = 100 * MB;
static const size_t kMarkCompactSpeed = 200 * KB;
static const size_t kMarkingSpeed = 200 * KB;
- static const size_t kScavengeSpeed = 100 * KB;
- static const size_t kNewSpaceCapacity = 1 * MB;
- static const size_t kNewSpaceAllocationThroughput = 10 * KB;
static const int kMaxNotifications = 100;
private:
@@ -111,88 +98,6 @@ TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
}
-TEST_F(GCIdleTimeHandlerTest, DoScavengeEmptyNewSpace) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- int idle_time_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeFullNewSpace) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- int idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeUnknownScavengeSpeed) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = 0;
- int idle_time_ms = 8;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeLowScavengeSpeed) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = 1 * KB;
- int idle_time_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeLowAllocationRate) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.new_space_allocation_throughput_in_bytes_per_ms =
- GCIdleTimeHandler::kLowAllocationThroughput - 1;
- int idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
- int idle_time_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoNotScavengeSmallNewSpaceSize) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = (MB / 2) - 1;
- heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
- int idle_time_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
size_t idle_time_ms = GCIdleTimeHandler::kMaxScheduledIdleTime;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_ms, 0, 0));
@@ -221,7 +126,7 @@ TEST_F(GCIdleTimeHandlerTest, DontDoFinalIncrementalMarkCompact) {
TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 0;
@@ -231,7 +136,7 @@ TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.contexts_disposal_rate =
GCIdleTimeHandler::kHighContextDisposalRate - 1;
@@ -243,7 +148,7 @@ TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.contexts_disposal_rate = 1.0;
heap_state.incremental_marking_stopped = true;
@@ -254,119 +159,49 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.contexts_disposal_rate =
GCIdleTimeHandler::kHighContextDisposalRate;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed - 1);
+ size_t speed = kMarkCompactSpeed;
+ double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.contexts_disposal_rate =
GCIdleTimeHandler::kHighContextDisposalRate;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed - 1);
+ size_t speed = kMarkCompactSpeed;
+ double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
- double idle_time_ms = 10;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
- static_cast<size_t>(action.parameter));
- EXPECT_LT(0, action.parameter);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
double idle_time_ms = 10;
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
- static_cast<size_t>(action.parameter));
- EXPECT_LT(0, action.parameter);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.incremental_marking_stopped = true;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed - 1);
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, FinalizeSweeping) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
- heap_state.sweeping_in_progress = true;
- heap_state.sweeping_completed = true;
- double idle_time_ms = 10.0;
+ size_t speed = kMarkCompactSpeed;
+ double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FINALIZE_SWEEPING, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, CannotFinalizeSweeping) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.incremental_marking_stopped = true;
- heap_state.sweeping_in_progress = true;
- heap_state.sweeping_completed = false;
- double idle_time_ms = 10.0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, Scavenge) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- int idle_time_ms = 10;
- heap_state.used_new_space_size =
- heap_state.new_space_capacity -
- (kNewSpaceAllocationThroughput * idle_time_ms);
- GCIdleTimeAction action =
- handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_SCAVENGE, action.type);
- heap_state.used_new_space_size = 0;
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- int idle_time_ms = 10;
- heap_state.incremental_marking_stopped = true;
- heap_state.used_new_space_size =
- heap_state.new_space_capacity -
- (kNewSpaceAllocationThroughput * idle_time_ms);
- GCIdleTimeAction action =
- handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_SCAVENGE, action.type);
- heap_state.used_new_space_size = 0;
- action = handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
EXPECT_EQ(DONE, action.type);
}
TEST_F(GCIdleTimeHandlerTest, DoNotStartIncrementalMarking) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 10.0;
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
@@ -375,19 +210,19 @@ TEST_F(GCIdleTimeHandlerTest, DoNotStartIncrementalMarking) {
TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 10.0;
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
EXPECT_EQ(DONE, action.type);
heap_state.incremental_marking_stopped = false;
action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
for (int i = 0; i < kMaxNotifications; i++) {
GCIdleTimeAction action = handler()->Compute(0, heap_state);
EXPECT_EQ(DO_NOTHING, action.type);
@@ -396,7 +231,7 @@ TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
TEST_F(GCIdleTimeHandlerTest, SmallIdleTimeNothingToDo) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
for (int i = 0; i < kMaxNotifications; i++) {
GCIdleTimeAction action = handler()->Compute(10, heap_state);
@@ -405,28 +240,9 @@ TEST_F(GCIdleTimeHandlerTest, SmallIdleTimeNothingToDo) {
}
-TEST_F(GCIdleTimeHandlerTest, DoneIfNotMakingProgressOnSweeping) {
- // Regression test for crbug.com/489323.
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
-
- // Simulate sweeping being in-progress but not complete.
- heap_state.incremental_marking_stopped = true;
- heap_state.sweeping_in_progress = true;
- heap_state.sweeping_completed = false;
- double idle_time_ms = 10.0;
- for (int i = 0; i < GCIdleTimeHandler::kMaxNoProgressIdleTimes; i++) {
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
- }
- // We should return DONE after not making progress for some time.
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
-}
-
-
TEST_F(GCIdleTimeHandlerTest, DoneIfNotMakingProgressOnIncrementalMarking) {
// Regression test for crbug.com/489323.
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
// Simulate incremental marking stopped and not eligible to start.
heap_state.incremental_marking_stopped = true;
diff --git a/deps/v8/test/unittests/heap/scavenge-job-unittest.cc b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
new file mode 100644
index 0000000000..dbd463c2d5
--- /dev/null
+++ b/deps/v8/test/unittests/heap/scavenge-job-unittest.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/globals.h"
+#include "src/heap/scavenge-job.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+const size_t kScavengeSpeedInBytesPerMs = 500 * KB;
+const size_t kNewSpaceCapacity = 8 * MB;
+
+
+TEST(ScavengeJob, AllocationLimitEmptyNewSpace) {
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, 0, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitFullNewSpace) {
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, kNewSpaceCapacity, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitUnknownScavengeSpeed) {
+ size_t expected_size = ScavengeJob::kInitialScavengeSpeedInBytesPerMs *
+ ScavengeJob::kAverageIdleTimeMs -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size - 1,
+ kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size,
+ kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitLowScavengeSpeed) {
+ size_t scavenge_speed = 1 * KB;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, ScavengeJob::kMinAllocationLimit - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, ScavengeJob::kMinAllocationLimit, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitAverageScavengeSpeed) {
+ size_t expected_size =
+ kScavengeSpeedInBytesPerMs * ScavengeJob::kAverageIdleTimeMs -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, ScavengeJob::kMinAllocationLimit,
+ kNewSpaceCapacity));
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, expected_size - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, expected_size, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
+ size_t scavenge_speed = kNewSpaceCapacity;
+ size_t expected_size =
+ static_cast<size_t>(
+ kNewSpaceCapacity *
+ ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace) -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, expected_size - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, expected_size, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeUnknownScavengeSpeed) {
+ size_t scavenge_speed = ScavengeJob::kInitialScavengeSpeedInBytesPerMs;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(
+ ScavengeJob::EnoughIdleTimeForScavenge(expected_time, 0, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(expected_time - 1, 0,
+ new_space_size));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeLowScavengeSpeed) {
+ size_t scavenge_speed = 1 * KB;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time, scavenge_speed, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time - 1, scavenge_speed, new_space_size));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeHighScavengeSpeed) {
+ size_t scavenge_speed = kNewSpaceCapacity;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time, scavenge_speed, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time - 1, scavenge_speed, new_space_size));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index aead34770c..779361ffcf 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -5,13 +5,14 @@
#include "src/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-array-iterator.h"
#include "test/unittests/test-utils.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeArrayBuilderTest : public TestWithIsolate {
+class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
public:
BytecodeArrayBuilderTest() {}
~BytecodeArrayBuilderTest() override {}
@@ -19,14 +20,16 @@ class BytecodeArrayBuilderTest : public TestWithIsolate {
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
- BytecodeArrayBuilder builder(isolate());
+ BytecodeArrayBuilder builder(isolate(), zone());
builder.set_locals_count(1);
+ builder.set_parameter_count(0);
CHECK_EQ(builder.locals_count(), 1);
// Emit constant loads.
builder.LoadLiteral(Smi::FromInt(0))
.LoadLiteral(Smi::FromInt(8))
+ .LoadLiteral(Smi::FromInt(10000000))
.LoadUndefined()
.LoadNull()
.LoadTheHole()
@@ -37,13 +40,51 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
Register reg(0);
builder.LoadAccumulatorWithRegister(reg).StoreAccumulatorInRegister(reg);
- // Emit binary operators invocations.
+ // Emit global load operations.
+ builder.LoadGlobal(1);
+
+ // Emit load / store property operations.
+ builder.LoadNamedProperty(reg, 0, LanguageMode::SLOPPY)
+ .LoadKeyedProperty(reg, 0, LanguageMode::SLOPPY)
+ .StoreNamedProperty(reg, reg, 0, LanguageMode::SLOPPY)
+ .StoreKeyedProperty(reg, reg, 0, LanguageMode::SLOPPY);
+
+ // Call operations.
+ builder.Call(reg, reg, 0);
+
+ // Emit binary operator invocations.
builder.BinaryOperation(Token::Value::ADD, reg)
.BinaryOperation(Token::Value::SUB, reg)
.BinaryOperation(Token::Value::MUL, reg)
- .BinaryOperation(Token::Value::DIV, reg);
+ .BinaryOperation(Token::Value::DIV, reg)
+ .BinaryOperation(Token::Value::MOD, reg);
+
+ // Emit test operator invocations.
+ builder.CompareOperation(Token::Value::EQ, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::NE, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::EQ_STRICT, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::NE_STRICT, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::LT, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::GT, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::LTE, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::GTE, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::INSTANCEOF, reg, LanguageMode::SLOPPY)
+ .CompareOperation(Token::Value::IN, reg, LanguageMode::SLOPPY);
+
+ // Emit cast operator invocations.
+ builder.LoadNull().CastAccumulatorToBoolean();
// Emit control flow. Return must be the last instruction.
+ BytecodeLabel start;
+ builder.Bind(&start);
+ // Short jumps with Imm8 operands
+ builder.Jump(&start).JumpIfTrue(&start).JumpIfFalse(&start);
+ // Insert dummy ops to force longer jumps
+ for (int i = 0; i < 128; i++) {
+ builder.LoadTrue();
+ }
+ // Longer jumps requiring Constant operand
+ builder.Jump(&start).JumpIfTrue(&start).JumpIfFalse(&start);
builder.Return();
// Generate BytecodeArray.
@@ -77,7 +118,8 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
for (int locals = 0; locals < 5; locals++) {
for (int temps = 0; temps < 3; temps++) {
- BytecodeArrayBuilder builder(isolate());
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
builder.set_locals_count(locals);
builder.Return();
@@ -95,7 +137,8 @@ TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
TEST_F(BytecodeArrayBuilderTest, TemporariesRecycled) {
- BytecodeArrayBuilder builder(isolate());
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
builder.set_locals_count(0);
builder.Return();
@@ -132,6 +175,223 @@ TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
CHECK_EQ(actual_index, index);
}
+
+TEST_F(BytecodeArrayBuilderTest, Parameters) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(10);
+ builder.set_locals_count(0);
+
+ Register param0(builder.Parameter(0));
+ Register param9(builder.Parameter(9));
+ CHECK_EQ(param9.index() - param0.index(), 9);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, Constants) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+
+ Factory* factory = isolate()->factory();
+ Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(3.14);
+ Handle<HeapObject> heap_num_2 = factory->NewHeapNumber(5.2);
+ Handle<Object> large_smi(Smi::FromInt(0x12345678), isolate());
+ Handle<HeapObject> heap_num_2_copy(*heap_num_2);
+ builder.LoadLiteral(heap_num_1)
+ .LoadLiteral(heap_num_2)
+ .LoadLiteral(large_smi)
+ .LoadLiteral(heap_num_1)
+ .LoadLiteral(heap_num_1)
+ .LoadLiteral(heap_num_2_copy);
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ // Should only have one entry for each identical constant.
+ CHECK_EQ(array->constant_pool()->length(), 3);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
+ static const int kFarJumpDistance = 256;
+
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+
+ BytecodeLabel far0, far1, far2;
+ BytecodeLabel near0, near1, near2;
+
+ builder.Jump(&near0)
+ .JumpIfTrue(&near1)
+ .JumpIfFalse(&near2)
+ .Bind(&near0)
+ .Bind(&near1)
+ .Bind(&near2)
+ .Jump(&far0)
+ .JumpIfTrue(&far1)
+ .JumpIfFalse(&far2);
+ for (int i = 0; i < kFarJumpDistance - 6; i++) {
+ builder.LoadUndefined();
+ }
+ builder.Bind(&far0).Bind(&far1).Bind(&far2);
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ DCHECK_EQ(array->length(), 12 + kFarJumpDistance - 6 + 1);
+
+ BytecodeArrayIterator iterator(array);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 6);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 4);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 2);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance));
+ CHECK_EQ(
+ array->get(iterator.current_offset() +
+ Smi::cast(*iterator.GetConstantForIndexOperand(0))->value()),
+ Bytecodes::ToByte(Bytecode::kReturn));
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance - 2));
+ CHECK_EQ(
+ array->get(iterator.current_offset() +
+ Smi::cast(*iterator.GetConstantForIndexOperand(0))->value()),
+ Bytecodes::ToByte(Bytecode::kReturn));
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance - 4));
+ CHECK_EQ(
+ array->get(iterator.current_offset() +
+ Smi::cast(*iterator.GetConstantForIndexOperand(0))->value()),
+ Bytecodes::ToByte(Bytecode::kReturn));
+ iterator.Advance();
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+
+ BytecodeLabel label0, label1, label2;
+ builder.Bind(&label0)
+ .Jump(&label0)
+ .Bind(&label1)
+ .JumpIfTrue(&label1)
+ .Bind(&label2)
+ .JumpIfFalse(&label2);
+ for (int i = 0; i < 64; i++) {
+ builder.Jump(&label2);
+ }
+ builder.JumpIfFalse(&label2);
+ builder.JumpIfTrue(&label1);
+ builder.Jump(&label0);
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(array);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 0);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 0);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 0);
+ iterator.Advance();
+ for (int i = 0; i < 64; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), -i * 2 - 2);
+ iterator.Advance();
+ }
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -130);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -134);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -138);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+
+ // Labels can only have 1 forward reference, but
+ // can be referred to mulitple times once bound.
+ BytecodeLabel label;
+
+ builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label).Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(array);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 2);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 0);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), -2);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
+ static const int kRepeats = 3;
+
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+
+ for (int i = 0; i < kRepeats; i++) {
+ BytecodeLabel label;
+ builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label);
+ }
+
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(array);
+ for (int i = 0; i < kRepeats; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 2);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), 0);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetSmi8Operand(0), -2);
+ iterator.Advance();
+ }
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
new file mode 100644
index 0000000000..b270856264
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -0,0 +1,100 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeArrayIteratorTest() {}
+ ~BytecodeArrayIteratorTest() override {}
+};
+
+
+TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
+ // Use a builder to create an array with containing multiple bytecodes
+ // with 0, 1 and 2 operands.
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(3);
+ builder.set_locals_count(2);
+
+ Factory* factory = isolate()->factory();
+ Handle<HeapObject> heap_num_0 = factory->NewHeapNumber(2.718);
+ Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(2147483647);
+ Smi* zero = Smi::FromInt(0);
+ Smi* smi_0 = Smi::FromInt(64);
+ Smi* smi_1 = Smi::FromInt(-65536);
+ Register reg_0(0);
+ Register reg_1(1);
+ Register reg_2 = Register::FromParameterIndex(2, builder.parameter_count());
+ int feedback_slot = 97;
+
+ builder.LoadLiteral(heap_num_0)
+ .LoadLiteral(heap_num_1)
+ .LoadLiteral(zero)
+ .LoadLiteral(smi_0)
+ .LoadLiteral(smi_1)
+ .LoadAccumulatorWithRegister(reg_0)
+ .LoadNamedProperty(reg_1, feedback_slot, LanguageMode::SLOPPY)
+ .StoreAccumulatorInRegister(reg_2)
+ .Return();
+
+ // Test iterator sees the expected output from the builder.
+ BytecodeArrayIterator iterator(builder.ToBytecodeArray());
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(heap_num_0));
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(heap_num_1));
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi8);
+ CHECK_EQ(Smi::FromInt(iterator.GetSmi8Operand(0)), smi_0);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0), smi_1);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdar);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadIC);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ CHECK_EQ(iterator.GetIndexOperand(1), feedback_slot);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_2.index());
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ CHECK(!iterator.done());
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
new file mode 100644
index 0000000000..729978643f
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecodes-unittest.cc
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecodes.h"
+#include "test/unittests/test-utils.h"
+
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+TEST(OperandConversion, Registers) {
+ for (int i = 0; i < 128; i++) {
+ uint8_t operand_value = Register(i).ToOperand();
+ Register r = Register::FromOperand(operand_value);
+ CHECK_EQ(i, r.index());
+ }
+}
+
+
+TEST(OperandConversion, Parameters) {
+ int parameter_counts[] = {7, 13, 99};
+
+ size_t count = sizeof(parameter_counts) / sizeof(parameter_counts[0]);
+ for (size_t p = 0; p < count; p++) {
+ int parameter_count = parameter_counts[p];
+ for (int i = 0; i < parameter_count; i++) {
+ Register r = Register::FromParameterIndex(i, parameter_count);
+ uint8_t operand_value = r.ToOperand();
+ Register s = Register::FromOperand(operand_value);
+ CHECK_EQ(i, s.ToParameterIndex(parameter_count));
+ }
+ }
+}
+
+
+TEST(OperandConversion, RegistersParametersNoOverlap) {
+ std::vector<uint8_t> operand_count(256);
+
+ for (int i = 0; i <= Register::kMaxRegisterIndex; i++) {
+ Register r = Register(i);
+ uint8_t operand = r.ToOperand();
+ operand_count[operand] += 1;
+ CHECK_EQ(operand_count[operand], 1);
+ }
+
+ int parameter_count = Register::MaxParameterIndex() + 1;
+ for (int i = 0; i < parameter_count; i++) {
+ Register r = Register::FromParameterIndex(i, parameter_count);
+ uint8_t operand = r.ToOperand();
+ operand_count[operand] += 1;
+ CHECK_EQ(operand_count[operand], 1);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc b/deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc
new file mode 100644
index 0000000000..0c3d414798
--- /dev/null
+++ b/deps/v8/test/unittests/runtime/runtime-interpreter-unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/runtime/runtime.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class RuntimeInterpreterTest : public TestWithIsolateAndZone {
+ public:
+ typedef Object* (*RuntimeMethod)(int, Object**, Isolate*);
+
+ RuntimeInterpreterTest() {}
+ ~RuntimeInterpreterTest() override {}
+
+ bool TestOperatorWithObjects(RuntimeMethod method, Handle<Object> lhs,
+ Handle<Object> rhs, bool expected);
+ bool TestOperator(RuntimeMethod method, int32_t lhs, int32_t rhs,
+ bool expected);
+ bool TestOperator(RuntimeMethod method, double lhs, double rhs,
+ bool expected);
+ bool TestOperator(RuntimeMethod method, const char* lhs, const char* rhs,
+ bool expected);
+};
+
+
+bool RuntimeInterpreterTest::TestOperatorWithObjects(RuntimeMethod method,
+ Handle<Object> lhs,
+ Handle<Object> rhs,
+ bool expected) {
+ Object* args_object[] = {*rhs, *lhs};
+ Handle<Object> result =
+ handle(method(2, &args_object[1], isolate()), isolate());
+ CHECK(result->IsTrue() || result->IsFalse());
+ return result->IsTrue() == expected;
+}
+
+
+bool RuntimeInterpreterTest::TestOperator(RuntimeMethod method, int32_t lhs,
+ int32_t rhs, bool expected) {
+ Handle<Object> x = isolate()->factory()->NewNumberFromInt(lhs);
+ Handle<Object> y = isolate()->factory()->NewNumberFromInt(rhs);
+ return TestOperatorWithObjects(method, x, y, expected);
+}
+
+
+bool RuntimeInterpreterTest::TestOperator(RuntimeMethod method, double lhs,
+ double rhs, bool expected) {
+ Handle<Object> x = isolate()->factory()->NewNumber(lhs);
+ Handle<Object> y = isolate()->factory()->NewNumber(rhs);
+ CHECK_EQ(HeapNumber::cast(*x)->value(), lhs);
+ CHECK_EQ(HeapNumber::cast(*y)->value(), rhs);
+ return TestOperatorWithObjects(method, x, y, expected);
+}
+
+
+bool RuntimeInterpreterTest::TestOperator(RuntimeMethod method, const char* lhs,
+ const char* rhs, bool expected) {
+ Handle<Object> x = isolate()->factory()->NewStringFromAsciiChecked(lhs);
+ Handle<Object> y = isolate()->factory()->NewStringFromAsciiChecked(rhs);
+ return TestOperatorWithObjects(method, x, y, expected);
+}
+
+
+TEST_F(RuntimeInterpreterTest, TestOperatorsWithIntegers) {
+ int32_t inputs[] = {kMinInt, Smi::kMinValue, -17, -1, 0, 1,
+ 991, Smi::kMaxValue, kMaxInt};
+ TRACED_FOREACH(int, lhs, inputs) {
+ TRACED_FOREACH(int, rhs, inputs) {
+#define INTEGER_OPERATOR_CHECK(r, op, x, y) \
+ CHECK(TestOperator(Runtime_Interpreter##r, x, y, x op y))
+ INTEGER_OPERATOR_CHECK(Equals, ==, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(NotEquals, !=, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(StrictEquals, ==, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(StrictNotEquals, !=, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(LessThan, <, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(GreaterThan, >, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(LessThanOrEqual, <=, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(GreaterThanOrEqual, >=, lhs, rhs);
+#undef INTEGER_OPERATOR_CHECK
+ }
+ }
+}
+
+
+TEST_F(RuntimeInterpreterTest, TestOperatorsWithDoubles) {
+ double inputs[] = {std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ -0.001,
+ 0.01,
+ 3.14,
+ -6.02214086e23};
+ TRACED_FOREACH(double, lhs, inputs) {
+ TRACED_FOREACH(double, rhs, inputs) {
+#define DOUBLE_OPERATOR_CHECK(r, op, x, y) \
+ CHECK(TestOperator(Runtime_Interpreter##r, x, y, x op y))
+ DOUBLE_OPERATOR_CHECK(Equals, ==, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(NotEquals, !=, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(StrictEquals, ==, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(StrictNotEquals, !=, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(LessThan, <, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(GreaterThan, >, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(LessThanOrEqual, <=, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(GreaterThanOrEqual, >=, lhs, rhs);
+#undef DOUBLE_OPERATOR_CHECK
+ }
+ }
+}
+
+
+TEST_F(RuntimeInterpreterTest, TestOperatorsWithString) {
+ const char* inputs[] = {"abc", "a", "def", "0"};
+ TRACED_FOREACH(const char*, lhs, inputs) {
+ TRACED_FOREACH(const char*, rhs, inputs) {
+#define STRING_OPERATOR_CHECK(r, op, x, y) \
+ CHECK(TestOperator(Runtime_Interpreter##r, x, y, \
+ std::string(x) op std::string(y)))
+ STRING_OPERATOR_CHECK(Equals, ==, lhs, rhs);
+ STRING_OPERATOR_CHECK(NotEquals, !=, lhs, rhs);
+ STRING_OPERATOR_CHECK(StrictEquals, ==, lhs, rhs);
+ STRING_OPERATOR_CHECK(StrictNotEquals, !=, lhs, rhs);
+ STRING_OPERATOR_CHECK(LessThan, <, lhs, rhs);
+ STRING_OPERATOR_CHECK(GreaterThan, >, lhs, rhs);
+ STRING_OPERATOR_CHECK(LessThanOrEqual, <=, lhs, rhs);
+ STRING_OPERATOR_CHECK(GreaterThanOrEqual, >=, lhs, rhs);
+#undef STRING_OPERATOR_CHECK
+ }
+ }
+}
+
+
+TEST_F(RuntimeInterpreterTest, ToBoolean) {
+ double quiet_nan = std::numeric_limits<double>::quiet_NaN();
+ std::pair<Handle<Object>, bool> cases[] = {
+ std::make_pair(isolate()->factory()->NewNumberFromInt(0), false),
+ std::make_pair(isolate()->factory()->NewNumberFromInt(1), true),
+ std::make_pair(isolate()->factory()->NewNumberFromInt(100), true),
+ std::make_pair(isolate()->factory()->NewNumberFromInt(-1), true),
+ std::make_pair(isolate()->factory()->NewNumber(7.7), true),
+ std::make_pair(isolate()->factory()->NewNumber(0.00001), true),
+ std::make_pair(isolate()->factory()->NewNumber(quiet_nan), false),
+ std::make_pair(isolate()->factory()->NewHeapNumber(0.0), false),
+ std::make_pair(isolate()->factory()->undefined_value(), false),
+ std::make_pair(isolate()->factory()->null_value(), false),
+ std::make_pair(isolate()->factory()->true_value(), true),
+ std::make_pair(isolate()->factory()->false_value(), false),
+ std::make_pair(isolate()->factory()->NewStringFromStaticChars(""), false),
+ std::make_pair(isolate()->factory()->NewStringFromStaticChars("_"), true),
+ };
+
+ for (size_t i = 0; i < arraysize(cases); i++) {
+ auto& value_expected_tuple = cases[i];
+ Object* args_object[] = {*value_expected_tuple.first};
+ Handle<Object> result = handle(
+ Runtime_InterpreterToBoolean(1, &args_object[0], isolate()), isolate());
+ CHECK(result->IsBoolean());
+ CHECK_EQ(result->IsTrue(), value_expected_tuple.second);
+ }
+}
+
+
+} // Namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 60a5dea888..663ae372c5 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -27,6 +27,7 @@
'V8_IMMINENT_DEPRECATION_WARNINGS',
],
'sources': [ ### gcmole(all) ###
+ 'atomic-utils-unittest.cc',
'base/bits-unittest.cc',
'base/cpu-unittest.cc',
'base/division-by-constant-unittest.cc',
@@ -42,6 +43,7 @@
'base/sys-info-unittest.cc',
'base/utils/random-number-generator-unittest.cc',
'char-predicates-unittest.cc',
+ 'compiler/bytecode-graph-builder-unittest.cc',
'compiler/change-lowering-unittest.cc',
'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
@@ -70,6 +72,7 @@
'compiler/js-type-feedback-unittest.cc',
'compiler/linkage-tail-call-unittest.cc',
'compiler/liveness-analyzer-unittest.cc',
+ 'compiler/live-range-unittest.cc',
'compiler/load-elimination-unittest.cc',
'compiler/loop-peeling-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
@@ -94,14 +97,19 @@
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
'counters-unittest.cc',
+ 'interpreter/bytecodes-unittest.cc',
'interpreter/bytecode-array-builder-unittest.cc',
+ 'interpreter/bytecode-array-iterator-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
+ 'heap/bitmap-unittest.cc',
'heap/gc-idle-time-handler-unittest.cc',
'heap/memory-reducer-unittest.cc',
'heap/heap-unittest.cc',
+ 'heap/scavenge-job-unittest.cc',
'run-all-unittests.cc',
+ 'runtime/runtime-interpreter-unittest.cc',
'test-utils.h',
'test-utils.cc',
],
diff --git a/deps/v8/test/webkit/fast/js/object-prototype-properties-expected.txt b/deps/v8/test/webkit/fast/js/object-prototype-properties-expected.txt
index 0d936f6c51..55bf6fd202 100644
--- a/deps/v8/test/webkit/fast/js/object-prototype-properties-expected.txt
+++ b/deps/v8/test/webkit/fast/js/object-prototype-properties-expected.txt
@@ -32,7 +32,7 @@ PASS Object.prototype.toLocaleString.call(undefined) threw exception TypeError:
PASS Object.prototype.valueOf.call(undefined) threw exception TypeError: Cannot convert undefined or null to object.
PASS Object.prototype.hasOwnProperty.call(undefined, 'hasOwnProperty') threw exception TypeError: Cannot convert undefined or null to object.
PASS Object.prototype.propertyIsEnumerable.call(undefined, 'propertyIsEnumerable') threw exception TypeError: Cannot convert undefined or null to object.
-PASS Object.prototype.isPrototypeOf.call(undefined, this) threw exception TypeError: Object.prototype.isPrototypeOf called on null or undefined.
+PASS Object.prototype.isPrototypeOf.call(undefined, this) threw exception TypeError: Cannot convert undefined or null to object.
PASS successfullyParsed is true
TEST COMPLETE
diff --git a/deps/v8/test/webkit/function-apply-aliased-expected.txt b/deps/v8/test/webkit/function-apply-aliased-expected.txt
index 8007e1a546..01962d341c 100644
--- a/deps/v8/test/webkit/function-apply-aliased-expected.txt
+++ b/deps/v8/test/webkit/function-apply-aliased-expected.txt
@@ -45,8 +45,6 @@ PASS myFunctionWithApply.aliasedApply(myObject, ['arg1']) is [myObject, "myFunct
PASS myFunctionWithApply.apply(myObject, arg1Array) is [myFunctionWithApply, "myFunctionWithApply.apply", myObject]
PASS forwarder(myFunctionWithApply, myObject, arg1Array) is [myFunctionWithApply, "myFunctionWithApply.apply", myObject]
PASS myFunctionWithApply.aliasedApply(myObject, arg1Array) is [myObject, "myFunctionWithApply", "arg1"]
-PASS myFunction.apply(null, new Array(500000)) threw exception RangeError: Maximum call stack size exceeded.
-PASS myFunction.apply(null, new Array(1 << 30)) threw exception RangeError: Maximum call stack size exceeded.
PASS recurseArguments.apply(null, new Array(50000)) threw exception RangeError: Maximum call stack size exceeded.
PASS successfullyParsed is true
diff --git a/deps/v8/test/webkit/function-apply-aliased.js b/deps/v8/test/webkit/function-apply-aliased.js
index a6a7ff4533..4c46c6c66a 100644
--- a/deps/v8/test/webkit/function-apply-aliased.js
+++ b/deps/v8/test/webkit/function-apply-aliased.js
@@ -68,18 +68,5 @@ shouldBe("myFunctionWithApply.apply(myObject, arg1Array)", '[myFunctionWithApply
shouldBe("forwarder(myFunctionWithApply, myObject, arg1Array)", '[myFunctionWithApply, "myFunctionWithApply.apply", myObject]');
shouldBe("myFunctionWithApply.aliasedApply(myObject, arg1Array)", '[myObject, "myFunctionWithApply", "arg1"]');
-function stackOverflowTest() {
- try {
- var a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z;
- stackOverflowTest();
- } catch(e) {
- // Blow the stack with a sparse array
- shouldThrow("myFunction.apply(null, new Array(500000))");
- // Blow the stack with a sparse array that is sufficiently large to cause int overflow
- shouldThrow("myFunction.apply(null, new Array(1 << 30))");
- }
-}
-stackOverflowTest();
-
// Blow the stack recursing with arguments
shouldThrow("recurseArguments.apply(null, new Array(50000))");
diff --git a/deps/v8/test/webkit/testcfg.py b/deps/v8/test/webkit/testcfg.py
index aa81964f5e..ed811d2922 100644
--- a/deps/v8/test/webkit/testcfg.py
+++ b/deps/v8/test/webkit/testcfg.py
@@ -55,7 +55,9 @@ class WebkitTestSuite(testsuite.TestSuite):
files.sort()
for filename in files:
if filename.endswith(".js"):
- testname = os.path.join(dirname[len(self.root) + 1:], filename[:-3])
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
test = testcase.TestCase(self, testname)
tests.append(test)
return tests
diff --git a/deps/v8/tools/cfi/blacklist.txt b/deps/v8/tools/cfi/blacklist.txt
new file mode 100644
index 0000000000..0ad565eafb
--- /dev/null
+++ b/deps/v8/tools/cfi/blacklist.txt
@@ -0,0 +1,4 @@
+# All std:: types
+# This should be possible to remove, if/when we build against
+# a statically linked libc++.
+type:std::* \ No newline at end of file
diff --git a/deps/v8/tools/check-name-clashes.py b/deps/v8/tools/check-name-clashes.py
deleted file mode 100755
index 25f3aace55..0000000000
--- a/deps/v8/tools/check-name-clashes.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import js2c
-import os
-import re
-import sys
-
-FILENAME = "src/runtime/runtime.h"
-LISTHEAD = re.compile(r"#define\s+(FOR_EACH_\w+)\((\w+)\)")
-LISTBODY = re.compile(r".*\\$")
-
-
-class Function(object):
- def __init__(self, match):
- self.name = match.group(1).strip()
-
-def ListMacroRe(list):
- macro = LISTHEAD.match(list[0]).group(2)
- re_string = "\s*%s\((\w+)" % macro
- return re.compile(re_string)
-
-
-def FindLists(filename):
- lists = []
- current_list = []
- mode = "SEARCHING"
- with open(filename, "r") as f:
- for line in f:
- if mode == "SEARCHING":
- match = LISTHEAD.match(line)
- if match:
- mode = "APPENDING"
- current_list.append(line)
- else:
- current_list.append(line)
- match = LISTBODY.match(line)
- if not match:
- mode = "SEARCHING"
- lists.append(current_list)
- current_list = []
- return lists
-
-
-# Detects runtime functions by parsing FILENAME.
-def FindRuntimeFunctions():
- functions = []
- lists = FindLists(FILENAME)
- for list in lists:
- function_re = ListMacroRe(list)
- for line in list:
- match = function_re.match(line)
- if match:
- functions.append(Function(match))
- return functions
-
-
-class Builtin(object):
- def __init__(self, match):
- self.name = match.group(1)
-
-
-def FindJSNatives():
- PATH = "src"
- fileslist = []
- for (root, dirs, files) in os.walk(PATH):
- for f in files:
- if f.endswith(".js"):
- fileslist.append(os.path.join(root, f))
- natives = []
- regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
- matches = 0
- for filename in fileslist:
- with open(filename, "r") as f:
- file_contents = f.read()
- file_contents = js2c.ExpandInlineMacros(file_contents)
- lines = file_contents.split("\n")
- partial_line = ""
- for line in lines:
- if line.startswith("function") and not '{' in line:
- partial_line += line.rstrip()
- continue
- if partial_line:
- partial_line += " " + line.strip()
- if '{' in line:
- line = partial_line
- partial_line = ""
- else:
- continue
- match = regexp.match(line)
- if match:
- natives.append(Builtin(match))
- return natives
-
-
-def Main():
- functions = FindRuntimeFunctions()
- natives = FindJSNatives()
- errors = 0
- runtime_map = {}
- for f in functions:
- runtime_map[f.name] = 1
- for b in natives:
- if b.name in runtime_map:
- print("JS_Native/Runtime_Function name clash: %s" % b.name)
- errors += 1
-
- if errors > 0:
- return 1
- print("Runtime/Natives name clashes: checked %d/%d functions, all good." %
- (len(functions), len(natives)))
- return 0
-
-
-if __name__ == "__main__":
- sys.exit(Main())
diff --git a/deps/v8/tools/eval_gc_nvp.py b/deps/v8/tools/eval_gc_nvp.py
new file mode 100755
index 0000000000..8a9b8e7072
--- /dev/null
+++ b/deps/v8/tools/eval_gc_nvp.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This script is used to analyze GCTracer's NVP output."""
+
+
+from argparse import ArgumentParser
+from copy import deepcopy
+from gc_nvp_common import split_nvp
+from math import log
+from sys import stdin
+
+
+class LinearBucket:
+ def __init__(self, granularity):
+ self.granularity = granularity
+
+ def value_to_bucket(self, value):
+ return int(value / self.granularity)
+
+ def bucket_to_range(self, bucket):
+ return (bucket * self.granularity, (bucket + 1) * self.granularity)
+
+
+class Log2Bucket:
+ def __init__(self, start):
+ self.start = int(log(start, 2)) - 1
+
+ def value_to_bucket(self, value):
+ index = int(log(value, 2))
+ index -= self.start
+ if index < 0:
+ index = 0
+ return index
+
+ def bucket_to_range(self, bucket):
+ if bucket == 0:
+ return (0, 2 ** (self.start + 1))
+ bucket += self.start
+ return (2 ** bucket, 2 ** (bucket + 1))
+
+
+class Histogram:
+ def __init__(self, bucket_trait, fill_empty):
+ self.histogram = {}
+ self.fill_empty = fill_empty
+ self.bucket_trait = bucket_trait
+
+ def add(self, key):
+ index = self.bucket_trait.value_to_bucket(key)
+ if index not in self.histogram:
+ self.histogram[index] = 0
+ self.histogram[index] += 1
+
+ def __str__(self):
+ ret = []
+ keys = self.histogram.keys()
+ keys.sort()
+ last = keys[len(keys) - 1]
+ for i in range(0, last + 1):
+ (min_value, max_value) = self.bucket_trait.bucket_to_range(i)
+ if i == keys[0]:
+ keys.pop(0)
+ ret.append(" [{0},{1}[: {2}".format(
+ str(min_value), str(max_value), self.histogram[i]))
+ else:
+ if self.fill_empty:
+ ret.append(" [{0},{1}[: {2}".format(
+ str(min_value), str(max_value), 0))
+ return "\n".join(ret)
+
+
+class Category:
+ def __init__(self, key, histogram):
+ self.key = key
+ self.values = []
+ self.histogram = histogram
+
+ def process_entry(self, entry):
+ if self.key in entry:
+ self.values.append(float(entry[self.key]))
+ if self.histogram:
+ self.histogram.add(float(entry[self.key]))
+
+ def __str__(self):
+ ret = [self.key]
+ ret.append(" len: {0}".format(len(self.values)))
+ if len(self.values) > 0:
+ ret.append(" min: {0}".format(min(self.values)))
+ ret.append(" max: {0}".format(max(self.values)))
+ ret.append(" avg: {0}".format(sum(self.values) / len(self.values)))
+ if self.histogram:
+ ret.append(str(self.histogram))
+ return "\n".join(ret)
+
+
+def main():
+ parser = ArgumentParser(description="Process GCTracer's NVP output")
+ parser.add_argument('keys', metavar='KEY', type=str, nargs='+',
+ help='the keys of NVPs to process')
+ parser.add_argument('--histogram-type', metavar='<linear|log2>',
+ type=str, nargs='?', default="linear",
+ help='histogram type to use (default: linear)')
+ linear_group = parser.add_argument_group('linear histogram specific')
+ linear_group.add_argument('--linear-histogram-granularity',
+ metavar='GRANULARITY', type=int, nargs='?',
+ default=5,
+ help='histogram granularity (default: 5)')
+ log2_group = parser.add_argument_group('log2 histogram specific')
+ log2_group.add_argument('--log2-histogram-init-bucket', metavar='START',
+ type=int, nargs='?', default=64,
+ help='initial buck size (default: 64)')
+ parser.add_argument('--histogram-omit-empty-buckets',
+ dest='histogram_omit_empty',
+ action='store_true',
+ help='omit empty histogram buckets')
+ parser.add_argument('--no-histogram', dest='histogram',
+ action='store_false', help='do not print histogram')
+ parser.set_defaults(histogram=True)
+ parser.set_defaults(histogram_omit_empty=False)
+ args = parser.parse_args()
+
+ histogram = None
+ if args.histogram:
+ bucket_trait = None
+ if args.histogram_type == "log2":
+ bucket_trait = Log2Bucket(args.log2_histogram_init_bucket)
+ else:
+ bucket_trait = LinearBucket(args.linear_histogram_granularity)
+ histogram = Histogram(bucket_trait, not args.histogram_omit_empty)
+
+ categories = [ Category(key, deepcopy(histogram))
+ for key in args.keys ]
+
+ while True:
+ line = stdin.readline()
+ if not line:
+ break
+ obj = split_nvp(line)
+ for category in categories:
+ category.process_entry(obj)
+
+ for category in categories:
+ print(category)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/deps/v8/tools/fuzz-harness.sh b/deps/v8/tools/fuzz-harness.sh
index cef59868a9..31023de3ab 100755
--- a/deps/v8/tools/fuzz-harness.sh
+++ b/deps/v8/tools/fuzz-harness.sh
@@ -85,7 +85,9 @@ python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
"$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
exit_code=$(cat w* | grep " looking good" -c)
exit_code=$((100-exit_code))
-tar -cjf fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2 err-* w*
+archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+echo "Creating archive $archive"
+tar -cjf $archive err-* w*
rm -f err-* w*
echo "Total failures: $exit_code"
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index e3c9dbe076..1b6a6bbcf0 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -98,30 +98,6 @@ consts_misc = [
'value': 'PropertyDetails::FieldIndexField::kMask' },
{ 'name': 'prop_index_shift',
'value': 'PropertyDetails::FieldIndexField::kShift' },
- { 'name': 'prop_representation_mask',
- 'value': 'PropertyDetails::RepresentationField::kMask' },
- { 'name': 'prop_representation_shift',
- 'value': 'PropertyDetails::RepresentationField::kShift' },
- { 'name': 'prop_representation_integer8',
- 'value': 'Representation::Kind::kInteger8' },
- { 'name': 'prop_representation_uinteger8',
- 'value': 'Representation::Kind::kUInteger8' },
- { 'name': 'prop_representation_integer16',
- 'value': 'Representation::Kind::kInteger16' },
- { 'name': 'prop_representation_uinteger16',
- 'value': 'Representation::Kind::kUInteger16' },
- { 'name': 'prop_representation_smi',
- 'value': 'Representation::Kind::kSmi' },
- { 'name': 'prop_representation_integer32',
- 'value': 'Representation::Kind::kInteger32' },
- { 'name': 'prop_representation_double',
- 'value': 'Representation::Kind::kDouble' },
- { 'name': 'prop_representation_heapobject',
- 'value': 'Representation::Kind::kHeapObject' },
- { 'name': 'prop_representation_tagged',
- 'value': 'Representation::Kind::kTagged' },
- { 'name': 'prop_representation_external',
- 'value': 'Representation::Kind::kExternal' },
{ 'name': 'prop_desc_key',
'value': 'DescriptorArray::kDescriptorKey' },
@@ -145,10 +121,6 @@ consts_misc = [
'value': 'Map::ElementsKindBits::kShift' },
{ 'name': 'bit_field3_dictionary_map_shift',
'value': 'Map::DictionaryMap::kShift' },
- { 'name': 'bit_field3_number_of_own_descriptors_mask',
- 'value': 'Map::NumberOfOwnDescriptorsBits::kMask' },
- { 'name': 'bit_field3_number_of_own_descriptors_shift',
- 'value': 'Map::NumberOfOwnDescriptorsBits::kShift' },
{ 'name': 'off_fp_context',
'value': 'StandardFrameConstants::kContextOffset' },
@@ -167,37 +139,20 @@ consts_misc = [
'value': 'ScopeInfo::kStackLocalCount' },
{ 'name': 'scopeinfo_idx_ncontextlocals',
'value': 'ScopeInfo::kContextLocalCount' },
- { 'name': 'scopeinfo_idx_ncontextglobals',
- 'value': 'ScopeInfo::kContextGlobalCount' },
{ 'name': 'scopeinfo_idx_first_vars',
'value': 'ScopeInfo::kVariablePartIndex' },
-
- { 'name': 'sharedfunctioninfo_start_position_mask',
- 'value': 'SharedFunctionInfo::kStartPositionMask' },
- { 'name': 'sharedfunctioninfo_start_position_shift',
- 'value': 'SharedFunctionInfo::kStartPositionShift' },
-
- { 'name': 'jsarray_buffer_was_neutered_mask',
- 'value': 'JSArrayBuffer::WasNeutered::kMask' },
- { 'name': 'jsarray_buffer_was_neutered_shift',
- 'value': 'JSArrayBuffer::WasNeutered::kShift' },
];
#
# The following useful fields are missing accessors, so we define fake ones.
#
extras_accessors = [
- 'JSFunction, context, Context, kContextOffset',
- 'Context, closure_index, int, CLOSURE_INDEX',
- 'Context, global_object_index, int, GLOBAL_OBJECT_INDEX',
- 'Context, previous_index, int, PREVIOUS_INDEX',
- 'Context, min_context_slots, int, MIN_CONTEXT_SLOTS',
'HeapObject, map, Map, kMapOffset',
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
- 'JSTypedArray, length, Object, kLengthOffset',
'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
+ 'JSTypedArray, length, Object, kLengthOffset',
'Map, instance_attributes, int, kInstanceAttributesOffset',
'Map, inobject_properties_or_constructor_function_index, int, kInObjectPropertiesOrConstructorFunctionIndexOffset',
'Map, instance_size, int, kInstanceSizeOffset',
@@ -207,7 +162,6 @@ extras_accessors = [
'Map, prototype, Object, kPrototypeOffset',
'NameDictionaryShape, prefix_size, int, kPrefixSize',
'NameDictionaryShape, entry_size, int, kEntrySize',
- 'NameDictionary, prefix_start_index, int, kPrefixStartIndex',
'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
'NumberDictionaryShape, entry_size, int, kEntrySize',
@@ -219,7 +173,6 @@ extras_accessors = [
'SeqOneByteString, chars, char, kHeaderSize',
'SeqTwoByteString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
- 'SharedFunctionInfo, scope_info, ScopeInfo, kScopeInfoOffset',
'SlicedString, parent, String, kParentOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
@@ -274,20 +227,6 @@ footer = '''
'''
#
-# Get the base class
-#
-def get_base_class(klass):
- if (klass == 'Object'):
- return klass;
-
- if (not (klass in klasses)):
- return None;
-
- k = klasses[klass];
-
- return get_base_class(k['parent']);
-
-#
# Loads class hierarchy and type information from "objects.h".
#
def load_objects():
@@ -325,14 +264,12 @@ def load_objects():
typestr += line;
continue;
- match = re.match('class (\w[^:]*)(: public (\w[^{]*))?\s*{\s*',
+ match = re.match('class (\w[^\s:]*)(: public (\w[^\s{]*))?\s*{',
line);
if (match):
- klass = match.group(1).rstrip().lstrip();
+ klass = match.group(1);
pklass = match.group(3);
- if (pklass):
- pklass = pklass.rstrip().lstrip();
klasses[klass] = { 'parent': pklass };
#
@@ -583,9 +520,6 @@ def emit_config():
keys.sort();
for klassname in keys:
pklass = klasses[klassname]['parent'];
- bklass = get_base_class(klassname);
- if (bklass != 'Object'):
- continue;
if (pklass == None):
continue;
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 1e5705d7a5..bcb580167e 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -30,8 +30,10 @@
'icu_use_data_file_flag%': 0,
'v8_code': 1,
'v8_random_seed%': 314159265,
+ 'v8_vector_stores%': 0,
'embed_script%': "",
'v8_extra_library_files%': [],
+ 'v8_experimental_extra_library_files%': [],
'mksnapshot_exec': '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
},
'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
@@ -182,6 +184,7 @@
'<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
],
'actions': [
@@ -203,12 +206,15 @@
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
}],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
],
},
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
+ '--startup_src', '<@(INTERMEDIATE_DIR)/snapshot.cc',
'<(embed_script)',
],
},
@@ -228,6 +234,7 @@
'<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'../../src/snapshot/snapshot-empty.cc',
],
'conditions': [
@@ -304,6 +311,9 @@
['v8_random_seed!=0', {
'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
}],
+ ['v8_vector_stores!=0', {
+ 'mksnapshot_flags': ['--vector-stores'],
+ }],
],
},
'conditions': [
@@ -311,25 +321,21 @@
'target_conditions': [
['_toolset=="host"', {
'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
'<(PRODUCT_DIR)/snapshot_blob_host.bin',
],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
'<(embed_script)',
],
}, {
'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
'<(embed_script)',
],
@@ -337,13 +343,11 @@
],
}, {
'outputs': [
- '<(INTERMEDIATE_DIR)/snapshot.cc',
'<(PRODUCT_DIR)/snapshot_blob.bin',
],
'action': [
'<(mksnapshot_exec)',
'<@(mksnapshot_flags)',
- '<@(INTERMEDIATE_DIR)/snapshot.cc',
'--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
'<(embed_script)',
],
@@ -385,8 +389,6 @@
'../../src/allocation.h',
'../../src/allocation-site-scopes.cc',
'../../src/allocation-site-scopes.h',
- '../../src/allocation-tracker.cc',
- '../../src/allocation-tracker.h',
'../../src/api.cc',
'../../src/api.h',
'../../src/api-natives.cc',
@@ -397,14 +399,17 @@
'../../src/assembler.h',
'../../src/assert-scope.h',
'../../src/assert-scope.cc',
- '../../src/ast-value-factory.cc',
- '../../src/ast-value-factory.h',
+ '../../src/ast-expression-visitor.cc',
+ '../../src/ast-expression-visitor.h',
'../../src/ast-literal-reindexer.cc',
'../../src/ast-literal-reindexer.h',
'../../src/ast-numbering.cc',
'../../src/ast-numbering.h',
+ '../../src/ast-value-factory.cc',
+ '../../src/ast-value-factory.h',
'../../src/ast.cc',
'../../src/ast.h',
+ '../../src/atomic-utils.h',
'../../src/background-parsing-task.cc',
'../../src/background-parsing-task.h',
'../../src/bailout-reason.cc',
@@ -429,14 +434,11 @@
'../../src/char-predicates-inl.h',
'../../src/char-predicates.h',
'../../src/checks.h',
- '../../src/circular-queue-inl.h',
- '../../src/circular-queue.h',
'../../src/code-factory.cc',
'../../src/code-factory.h',
'../../src/code-stubs.cc',
'../../src/code-stubs.h',
'../../src/code-stubs-hydrogen.cc',
- '../../src/code.h',
'../../src/codegen.cc',
'../../src/codegen.h',
'../../src/compilation-cache.cc',
@@ -455,6 +457,8 @@
'../../src/compiler/ast-loop-assignment-analyzer.h',
'../../src/compiler/basic-block-instrumentor.cc',
'../../src/compiler/basic-block-instrumentor.h',
+ '../../src/compiler/bytecode-graph-builder.cc',
+ '../../src/compiler/bytecode-graph-builder.h',
'../../src/compiler/change-lowering.cc',
'../../src/compiler/change-lowering.h',
'../../src/compiler/c-linkage.cc',
@@ -536,6 +540,8 @@
'../../src/compiler/linkage.h',
'../../src/compiler/liveness-analyzer.cc',
'../../src/compiler/liveness-analyzer.h',
+ '../../src/compiler/live-range-separator.cc',
+ '../../src/compiler/live-range-separator.h',
'../../src/compiler/load-elimination.cc',
'../../src/compiler/load-elimination.h',
'../../src/compiler/loop-analysis.cc',
@@ -573,8 +579,6 @@
'../../src/compiler/pipeline.h',
'../../src/compiler/pipeline-statistics.cc',
'../../src/compiler/pipeline-statistics.h',
- '../../src/compiler/preprocess-live-ranges.cc',
- '../../src/compiler/preprocess-live-ranges.h',
'../../src/compiler/raw-machine-assembler.cc',
'../../src/compiler/raw-machine-assembler.h',
'../../src/compiler/register-allocator.cc',
@@ -614,6 +618,7 @@
'../../src/compiler.h',
'../../src/context-measure.cc',
'../../src/context-measure.h',
+ '../../src/contexts-inl.h',
'../../src/contexts.cc',
'../../src/contexts.h',
'../../src/conversions-inl.h',
@@ -621,9 +626,6 @@
'../../src/conversions.h',
'../../src/counters.cc',
'../../src/counters.h',
- '../../src/cpu-profiler-inl.h',
- '../../src/cpu-profiler.cc',
- '../../src/cpu-profiler.h',
'../../src/date.cc',
'../../src/date.h',
'../../src/dateparser-inl.h',
@@ -696,11 +698,8 @@
'../../src/handles.cc',
'../../src/handles.h',
'../../src/hashmap.h',
- '../../src/heap-profiler.cc',
- '../../src/heap-profiler.h',
- '../../src/heap-snapshot-generator-inl.h',
- '../../src/heap-snapshot-generator.cc',
- '../../src/heap-snapshot-generator.h',
+ '../../src/heap/array-buffer-tracker.cc',
+ '../../src/heap/array-buffer-tracker.h',
'../../src/heap/memory-reducer.cc',
'../../src/heap/memory-reducer.h',
'../../src/heap/gc-idle-time-handler.cc',
@@ -710,17 +709,26 @@
'../../src/heap/heap-inl.h',
'../../src/heap/heap.cc',
'../../src/heap/heap.h',
- '../../src/heap/identity-map.cc',
- '../../src/heap/identity-map.h',
'../../src/heap/incremental-marking-inl.h',
+ '../../src/heap/incremental-marking-job.cc',
+ '../../src/heap/incremental-marking-job.h',
'../../src/heap/incremental-marking.cc',
'../../src/heap/incremental-marking.h',
'../../src/heap/mark-compact-inl.h',
'../../src/heap/mark-compact.cc',
'../../src/heap/mark-compact.h',
+ '../../src/heap/object-stats.cc',
+ '../../src/heap/object-stats.h',
'../../src/heap/objects-visiting-inl.h',
'../../src/heap/objects-visiting.cc',
'../../src/heap/objects-visiting.h',
+ '../../src/heap/scavenge-job.h',
+ '../../src/heap/scavenge-job.cc',
+ '../../src/heap/scavenger-inl.h',
+ '../../src/heap/scavenger.cc',
+ '../../src/heap/scavenger.h',
+ '../../src/heap/slots-buffer.cc',
+ '../../src/heap/slots-buffer.h',
'../../src/heap/spaces-inl.h',
'../../src/heap/spaces.cc',
'../../src/heap/spaces.h',
@@ -796,6 +804,8 @@
'../../src/ic/ic.h',
'../../src/ic/ic-compiler.cc',
'../../src/ic/ic-compiler.h',
+ '../../src/identity-map.cc',
+ '../../src/identity-map.h',
'../../src/interface-descriptors.cc',
'../../src/interface-descriptors.h',
'../../src/interpreter/bytecodes.cc',
@@ -804,8 +814,11 @@
'../../src/interpreter/bytecode-generator.h',
'../../src/interpreter/bytecode-array-builder.cc',
'../../src/interpreter/bytecode-array-builder.h',
+ '../../src/interpreter/bytecode-array-iterator.cc',
+ '../../src/interpreter/bytecode-array-iterator.h',
'../../src/interpreter/interpreter.cc',
'../../src/interpreter/interpreter.h',
+ '../../src/isolate-inl.h',
'../../src/isolate.cc',
'../../src/isolate.h',
'../../src/json-parser.h',
@@ -858,9 +871,25 @@
'../../src/preparser.h',
'../../src/prettyprinter.cc',
'../../src/prettyprinter.h',
- '../../src/profile-generator-inl.h',
- '../../src/profile-generator.cc',
- '../../src/profile-generator.h',
+ '../../src/profiler/allocation-tracker.cc',
+ '../../src/profiler/allocation-tracker.h',
+ '../../src/profiler/circular-queue-inl.h',
+ '../../src/profiler/circular-queue.h',
+ '../../src/profiler/cpu-profiler-inl.h',
+ '../../src/profiler/cpu-profiler.cc',
+ '../../src/profiler/cpu-profiler.h',
+ '../../src/profiler/heap-profiler.cc',
+ '../../src/profiler/heap-profiler.h',
+ '../../src/profiler/heap-snapshot-generator-inl.h',
+ '../../src/profiler/heap-snapshot-generator.cc',
+ '../../src/profiler/heap-snapshot-generator.h',
+ '../../src/profiler/profile-generator-inl.h',
+ '../../src/profiler/profile-generator.cc',
+ '../../src/profiler/profile-generator.h',
+ '../../src/profiler/sampler.cc',
+ '../../src/profiler/sampler.h',
+ '../../src/profiler/unbound-queue-inl.h',
+ '../../src/profiler/unbound-queue.h',
'../../src/property-details.h',
'../../src/property.cc',
'../../src/property.h',
@@ -897,6 +926,7 @@
'../../src/runtime/runtime-generator.cc',
'../../src/runtime/runtime-i18n.cc',
'../../src/runtime/runtime-internal.cc',
+ '../../src/runtime/runtime-interpreter.cc',
'../../src/runtime/runtime-json.cc',
'../../src/runtime/runtime-literals.cc',
'../../src/runtime/runtime-liveedit.cc',
@@ -904,6 +934,7 @@
'../../src/runtime/runtime-numbers.cc',
'../../src/runtime/runtime-object.cc',
'../../src/runtime/runtime-observe.cc',
+ '../../src/runtime/runtime-operators.cc',
'../../src/runtime/runtime-proxy.cc',
'../../src/runtime/runtime-regexp.cc',
'../../src/runtime/runtime-scopes.cc',
@@ -918,8 +949,6 @@
'../../src/runtime/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
- '../../src/sampler.cc',
- '../../src/sampler.h',
'../../src/scanner-character-streams.cc',
'../../src/scanner-character-streams.h',
'../../src/scanner.cc',
@@ -967,13 +996,17 @@
'../../src/types-inl.h',
'../../src/types.cc',
'../../src/types.h',
+ '../../src/typing-asm.cc',
+ '../../src/typing-asm.h',
+ '../../src/typing-reset.cc',
+ '../../src/typing-reset.h',
'../../src/typing.cc',
'../../src/typing.h',
- '../../src/unbound-queue-inl.h',
- '../../src/unbound-queue.h',
'../../src/unicode-inl.h',
'../../src/unicode.cc',
'../../src/unicode.h',
+ '../../src/unicode-cache-inl.h',
+ '../../src/unicode-cache.h',
'../../src/unicode-decoder.cc',
'../../src/unicode-decoder.h',
'../../src/unique.h',
@@ -991,6 +1024,7 @@
'../../src/version.h',
'../../src/vm-state-inl.h',
'../../src/vm-state.h',
+ '../../src/zone-type-cache.h',
'../../src/zone.cc',
'../../src/zone.h',
'../../src/zone-allocator.h',
@@ -1221,7 +1255,7 @@
'../../src/regexp/mips/regexp-macro-assembler-mips.h',
],
}],
- ['v8_target_arch=="mips64el"', {
+ ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'sources': [ ### gcmole(arch:mips64el) ###
'../../src/mips64/assembler-mips64.cc',
'../../src/mips64/assembler-mips64.h',
@@ -1714,6 +1748,7 @@
'<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -1820,7 +1855,6 @@
'../../src/harmony-regexp.js',
'../../src/harmony-reflect.js',
'../../src/harmony-spread.js',
- '../../src/harmony-object.js',
'../../src/harmony-object-observe.js',
'../../src/harmony-sharedarraybuffer.js',
'../../src/harmony-simd.js',
@@ -1834,6 +1868,7 @@
'libraries_code_stub_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-code-stub.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
'libraries_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-extras.bin',
+ 'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
},
'actions': [
{
@@ -1843,9 +1878,7 @@
'<@(library_files)',
'<@(i18n_library_files)'
],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
- ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/libraries.cc'],
'action': [
'python',
'../../tools/js2c.py',
@@ -1854,13 +1887,24 @@
'<@(library_files)',
'<@(i18n_library_files)'
],
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'outputs': ['<@(libraries_bin_file)'],
- 'action': [
- '--startup_blob', '<@(libraries_bin_file)',
- ],
- }],
+ },
+ {
+ 'action_name': 'js2c_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(library_files)',
+ '<@(i18n_library_files)'
+ ],
+ 'outputs': ['<@(libraries_bin_file)'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
+ 'CORE',
+ '<@(library_files)',
+ '<@(i18n_library_files)',
+ '--startup_blob', '<@(libraries_bin_file)',
+ '--nojs',
],
},
{
@@ -1869,9 +1913,7 @@
'../../tools/js2c.py',
'<@(experimental_library_files)',
],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc'],
'action': [
'python',
'../../tools/js2c.py',
@@ -1879,13 +1921,22 @@
'EXPERIMENTAL',
'<@(experimental_library_files)'
],
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'outputs': ['<@(libraries_experimental_bin_file)'],
- 'action': [
- '--startup_blob', '<@(libraries_experimental_bin_file)'
- ],
- }],
+ },
+ {
+ 'action_name': 'js2c_experimental_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(experimental_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_bin_file)'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
+ 'EXPERIMENTAL',
+ '<@(experimental_library_files)',
+ '--startup_blob', '<@(libraries_experimental_bin_file)',
+ '--nojs',
],
},
{
@@ -1894,9 +1945,7 @@
'../../tools/js2c.py',
'<@(code_stub_library_files)',
],
- 'outputs': [
- '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
- ],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc'],
'action': [
'python',
'../../tools/js2c.py',
@@ -1904,13 +1953,22 @@
'CODE_STUB',
'<@(code_stub_library_files)'
],
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'outputs': ['<@(libraries_code_stub_bin_file)'],
- 'action': [
- '--startup_blob', '<@(libraries_code_stub_bin_file)'
- ],
- }],
+ },
+ {
+ 'action_name': 'js2c_code_stubs_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(code_stub_library_files)',
+ ],
+ 'outputs': ['<@(libraries_code_stub_bin_file)'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/code-stub-libraries.cc',
+ 'CODE_STUB',
+ '<@(code_stub_library_files)',
+ '--startup_blob', '<@(libraries_code_stub_bin_file)',
+ '--nojs',
],
},
{
@@ -1919,23 +1977,64 @@
'../../tools/js2c.py',
'<@(v8_extra_library_files)',
],
- 'outputs': [
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
+ 'EXTRAS',
+ '<@(v8_extra_library_files)',
],
+ },
+ {
+ 'action_name': 'js2c_extras_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_extras_bin_file)'],
'action': [
'python',
'../../tools/js2c.py',
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'EXTRAS',
'<@(v8_extra_library_files)',
+ '--startup_blob', '<@(libraries_extras_bin_file)',
+ '--nojs',
],
- 'conditions': [
- [ 'v8_use_external_startup_data==1', {
- 'outputs': ['<@(libraries_extras_bin_file)'],
- 'action': [
- '--startup_blob', '<@(libraries_extras_bin_file)',
- ],
- }],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ ],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ },
+ {
+ 'action_name': 'js2c_experimental_extras_bin',
+ 'inputs': [
+ '../../tools/js2c.py',
+ '<@(v8_experimental_extra_library_files)',
+ ],
+ 'outputs': ['<@(libraries_experimental_extras_bin_file)'],
+ 'action': [
+ 'python',
+ '../../tools/js2c.py',
+ '<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
+ 'EXPERIMENTAL_EXTRAS',
+ '<@(v8_experimental_extra_library_files)',
+ '--startup_blob', '<@(libraries_experimental_extras_bin_file)',
+ '--nojs',
],
},
],
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index c280537379..bd50692bb8 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -31,10 +31,9 @@
# char arrays. It is used for embedded JavaScript code in the V8
# library.
-import os, re, sys, string
+import os, re
import optparse
import jsmin
-import bz2
import textwrap
@@ -108,6 +107,9 @@ def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
mapping = { }
def add_arg(str):
# Remember to expand recursively in the arguments
+ if arg_index[0] >= len(macro.args):
+ lineno = lines.count(os.linesep, 0, start) + 1
+ raise Error('line %s: Too many arguments for macro "%s"' % (lineno, name_pattern.pattern))
replacement = expander(str.strip())
mapping[macro.args[arg_index[0]]] = replacement
arg_index[0] += 1
@@ -583,7 +585,8 @@ def main():
help="file to write the startup blob to.")
parser.add_option("--js",
help="writes a JS file output instead of a C file",
- action="store_true")
+ action="store_true", default=False, dest='js')
+ parser.add_option("--nojs", action="store_false", default=False, dest='js')
parser.set_usage("""js2c out.cc type sources.js ...
out.cc: C code to be generated.
type: type parameter for NativesCollection template.
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index a835e61792..338c708548 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -45,62 +45,25 @@ import subprocess
import multiprocessing
from subprocess import PIPE
-# Disabled LINT rules and reason.
+# Special LINT rules diverging from default and reason.
+# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# build/include_what_you_use: Started giving false positives for variables
-# named "string" and "map" assuming that you needed to include STL headers.
-
-ENABLED_LINT_RULES = """
-build/class
-build/deprecated
-build/endif_comment
-build/forward_decl
-build/include_alpha
-build/include_order
-build/printf_format
-build/storage_class
-legal/copyright
-readability/boost
-readability/braces
-readability/casting
-readability/constructors
-readability/fn_size
-readability/function
-readability/multiline_comment
-readability/multiline_string
-readability/streams
-readability/todo
-readability/utf8
-runtime/arrays
-runtime/casting
-runtime/deprecated_fn
-runtime/explicit
-runtime/int
-runtime/memset
-runtime/mutex
-runtime/nonconf
-runtime/printf
-runtime/printf_format
-runtime/rtti
-runtime/sizeof
-runtime/string
-runtime/virtual
-runtime/vlog
-whitespace/blank_line
-whitespace/braces
-whitespace/comma
-whitespace/comments
-whitespace/ending_newline
-whitespace/indent
-whitespace/labels
-whitespace/line_length
-whitespace/newline
-whitespace/operators
-whitespace/parens
-whitespace/tab
-whitespace/todo
-""".split()
-
+# named "string" and "map" assuming that you needed to include STL headers.
# TODO(bmeurer): Fix and re-enable readability/check
+# TODO(mstarzinger): Fix and re-enable readability/namespace
+
+LINT_RULES = """
+-build/header_guard
++build/include_alpha
+-build/include_what_you_use
+-build/namespaces
+-readability/check
+-readability/inheritance
+-readability/namespace
+-readability/nolint
++readability/streams
+-runtime/references
+""".split()
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
@@ -256,15 +219,15 @@ class CppLintProcessor(SourceFileProcessor):
print 'No changes in files detected. Skipping cpplint check.'
return True
- filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
- command = [sys.executable, 'cpplint.py', '--filter', filt]
+ filters = ",".join([n for n in LINT_RULES])
+ command = [sys.executable, 'cpplint.py', '--filter', filters]
cpplint = self.GetCpplintScript(join(path, "tools"))
if cpplint is None:
print('Could not find cpplint.py. Make sure '
'depot_tools is installed and in the path.')
sys.exit(1)
- command = [sys.executable, cpplint, '--filter', filt]
+ command = [sys.executable, cpplint, '--filter', filters]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
@@ -438,12 +401,6 @@ class SourceProcessor(SourceFileProcessor):
return success
-def CheckRuntimeVsNativesNameClashes(workspace):
- code = subprocess.call(
- [sys.executable, join(workspace, "tools", "check-name-clashes.py")])
- return code == 0
-
-
def CheckExternalReferenceRegistration(workspace):
code = subprocess.call(
[sys.executable, join(workspace, "tools", "external-reference-check.py")])
@@ -495,7 +452,6 @@ def Main():
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success = SourceProcessor().Run(workspace) and success
- success = CheckRuntimeVsNativesNameClashes(workspace) and success
success = CheckExternalReferenceRegistration(workspace) and success
if success:
return 0
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 5fe3ba4251..378a9fd135 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -104,9 +104,8 @@ class CreateCommitMessage(Step):
def RunStep(self):
- # Stringify: [123, 234] -> "r123, r234"
- self["revision_list"] = ", ".join(map(lambda s: "r%s" % s,
- self["full_revision_list"]))
+ # Stringify: ["abcde", "12345"] -> "abcde, 12345"
+ self["revision_list"] = ", ".join(self["full_revision_list"])
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
diff --git a/deps/v8/tools/run-deopt-fuzzer.py b/deps/v8/tools/run-deopt-fuzzer.py
index 7fbf402d95..89474d8162 100755
--- a/deps/v8/tools/run-deopt-fuzzer.py
+++ b/deps/v8/tools/run-deopt-fuzzer.py
@@ -398,6 +398,7 @@ def Execute(arch, mode, args, options, suites, workspace):
"msan": False,
"dcheck_always_on": options.dcheck_always_on,
"novfp3": False,
+ "predictable": False,
"byteorder": sys.byteorder,
}
all_tests = []
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 31331686fa..a8cc3fab71 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -102,6 +102,7 @@ import math
import optparse
import os
import re
+import subprocess
import sys
from testrunner.local import commands
@@ -120,6 +121,7 @@ SUPPORTED_ARCHS = ["arm",
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
+TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
def LoadAndroidBuildTools(path): # pragma: no cover
@@ -457,7 +459,10 @@ class RunnableConfig(GraphConfig):
def GetCommand(self, shell_dir, extra_flags=None):
# TODO(machenbach): This requires +.exe if run on windows.
+ extra_flags = extra_flags or []
cmd = [os.path.join(shell_dir, self.binary)]
+ if self.binary != 'd8' and '--prof' in extra_flags:
+ print "Profiler supported only on a benchmark run with d8"
return cmd + self.GetCommandFlags(extra_flags=extra_flags)
def Run(self, runner, trybot):
@@ -640,6 +645,13 @@ class DesktopPlatform(Platform):
print output.stderr
if output.timed_out:
print ">>> Test timed out after %ss." % runnable.timeout
+ if '--prof' in self.extra_flags:
+ os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
+ if os_prefix:
+ tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
+ subprocess.check_call(tick_tools + " --only-summary", shell=True)
+ else: # pragma: no cover
+ print "Profiler option currently supported on Linux and Mac OS."
return output.stdout
diff --git a/deps/v8/tools/testrunner/local/commands.py b/deps/v8/tools/testrunner/local/commands.py
index 6aac3ffad5..a4df32c52a 100644
--- a/deps/v8/tools/testrunner/local/commands.py
+++ b/deps/v8/tools/testrunner/local/commands.py
@@ -61,12 +61,18 @@ def RunProcess(verbose, timeout, args, **rest):
error_mode = SEM_NOGPFAULTERRORBOX
prev_error_mode = Win32SetErrorMode(error_mode)
Win32SetErrorMode(error_mode | prev_error_mode)
- process = subprocess.Popen(
- args=popen_args,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- **rest
- )
+
+ try:
+ process = subprocess.Popen(
+ args=popen_args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ **rest
+ )
+ except Exception as e:
+ sys.stderr.write("Error executing: %s\n" % popen_args)
+ raise e
+
if (utils.IsWindows() and prev_error_mode != SEM_INVALID_VALUE):
Win32SetErrorMode(prev_error_mode)
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 60ec635262..85d93285eb 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -375,6 +375,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
+ "target_name": test.suite.shell(),
"variant": test.variant,
})
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 62c959be29..bfa53c5348 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -56,7 +56,7 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
"android_arm", "android_arm64", "android_ia32", "android_x87",
- "android_x64", "arm", "arm64", "ia32", "mips", "mipsel",
+ "android_x64", "arm", "arm64", "ia32", "mips", "mipsel", "mips64",
"mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
"macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index c8e43521e7..e0fff0d11a 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -226,7 +226,7 @@ class TestSuite(object):
continue
if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
return # Don't filter, run all tests in this suite.
- path = os.path.sep.join(argpath[1:])
+ path = '/'.join(argpath[1:])
if path[-1] == '*':
path = path[:-1]
globs.append(path)
diff --git a/deps/v8/tools/tickprocessor-driver.js b/deps/v8/tools/tickprocessor-driver.js
index 946f543219..dc8a87d9ec 100644
--- a/deps/v8/tools/tickprocessor-driver.js
+++ b/deps/v8/tools/tickprocessor-driver.js
@@ -77,6 +77,7 @@ var tickProcessor = new TickProcessor(
params.range,
sourceMap,
params.timedRange,
- params.pairwiseTimedRange);
+ params.pairwiseTimedRange,
+ params.onlySummary);
tickProcessor.processLogFile(params.logFileName);
tickProcessor.printStatistics();
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index d857573855..600d2eeb7b 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -156,7 +156,8 @@ function TickProcessor(
range,
sourceMap,
timedRange,
- pairwiseTimedRange) {
+ pairwiseTimedRange,
+ onlySummary) {
LogReader.call(this, {
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
@@ -247,6 +248,7 @@ function TickProcessor(
this.generation_ = 1;
this.currentProducerProfile_ = null;
+ this.onlySummary_ = onlySummary;
};
inherits(TickProcessor, LogReader);
@@ -456,29 +458,30 @@ TickProcessor.prototype.printStatistics = function() {
if (this.ignoreUnknown_) {
totalTicks -= this.ticks_.unaccounted;
}
+ var printAllTicks = !this.onlySummary_;
// Count library ticks
var flatViewNodes = flatView.head.children;
var self = this;
var libraryTicks = 0;
- this.printHeader('Shared libraries');
+ if(printAllTicks) this.printHeader('Shared libraries');
this.printEntries(flatViewNodes, totalTicks, null,
function(name) { return self.isSharedLibrary(name); },
- function(rec) { libraryTicks += rec.selfTime; });
+ function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
var nonLibraryTicks = totalTicks - libraryTicks;
var jsTicks = 0;
- this.printHeader('JavaScript');
+ if(printAllTicks) this.printHeader('JavaScript');
this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
function(name) { return self.isJsCode(name); },
- function(rec) { jsTicks += rec.selfTime; });
+ function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
var cppTicks = 0;
- this.printHeader('C++');
+ if(printAllTicks) this.printHeader('C++');
this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
function(name) { return self.isCppCode(name); },
- function(rec) { cppTicks += rec.selfTime; });
+ function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
this.printHeader('Summary');
this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
@@ -490,25 +493,27 @@ TickProcessor.prototype.printStatistics = function() {
this.ticks_.total, null);
}
- print('\n [C++ entry points]:');
- print(' ticks cpp total name');
- var c_entry_functions = this.profile_.getCEntryProfile();
- var total_c_entry = c_entry_functions[0].ticks;
- for (var i = 1; i < c_entry_functions.length; i++) {
- c = c_entry_functions[i];
- this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
- }
+ if(printAllTicks) {
+ print('\n [C++ entry points]:');
+ print(' ticks cpp total name');
+ var c_entry_functions = this.profile_.getCEntryProfile();
+ var total_c_entry = c_entry_functions[0].ticks;
+ for (var i = 1; i < c_entry_functions.length; i++) {
+ c = c_entry_functions[i];
+ this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
+ }
- this.printHeavyProfHeader();
- var heavyProfile = this.profile_.getBottomUpProfile();
- var heavyView = this.viewBuilder_.buildView(heavyProfile);
- // To show the same percentages as in the flat profile.
- heavyView.head.totalTime = totalTicks;
- // Sort by total time, desc, then by name, desc.
- heavyView.sort(function(rec1, rec2) {
- return rec2.totalTime - rec1.totalTime ||
- (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
- this.printHeavyProfile(heavyView.head.children);
+ this.printHeavyProfHeader();
+ var heavyProfile = this.profile_.getBottomUpProfile();
+ var heavyView = this.viewBuilder_.buildView(heavyProfile);
+ // To show the same percentages as in the flat profile.
+ heavyView.head.totalTime = totalTicks;
+ // Sort by total time, desc, then by name, desc.
+ heavyView.sort(function(rec1, rec2) {
+ return rec2.totalTime - rec1.totalTime ||
+ (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+ this.printHeavyProfile(heavyView.head.children);
+ }
};
@@ -600,13 +605,15 @@ TickProcessor.prototype.formatFunctionName = function(funcName) {
};
TickProcessor.prototype.printEntries = function(
- profile, totalTicks, nonLibTicks, filterP, callback) {
+ profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
var that = this;
this.processProfile(profile, filterP, function (rec) {
if (rec.selfTime == 0) return;
callback(rec);
var funcName = that.formatFunctionName(rec.internalFuncName);
- that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+ if(printAllTicks) {
+ that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+ }
});
};
@@ -884,7 +891,9 @@ function ArgumentsProcessor(args) {
'--timed-range': ['timedRange', true,
'Ignore ticks before first and after last Date.now() call'],
'--pairwise-timed-range': ['pairwiseTimedRange', true,
- 'Ignore ticks outside pairs of Date.now() calls']
+ 'Ignore ticks outside pairs of Date.now() calls'],
+ '--only-summary': ['onlySummary', true,
+ 'Print only tick summary, exclude other information']
};
this.argsDispatch_['--js'] = this.argsDispatch_['-j'];
this.argsDispatch_['--gc'] = this.argsDispatch_['-g'];
@@ -908,7 +917,8 @@ ArgumentsProcessor.DEFAULTS = {
range: 'auto,auto',
distortion: 0,
timedRange: false,
- pairwiseTimedRange: false
+ pairwiseTimedRange: false,
+ onlySummary: false
};
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index 5969ff0032..14b2329f74 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -4,13 +4,10 @@
# found in the LICENSE file.
import argparse
-import find_depot_tools
+import os
+import subprocess
import sys
-find_depot_tools.add_depot_tools_to_path()
-
-from git_cl import Changelist
-
BOTS = {
'--arm32': 'v8_arm32_perf_try',
'--linux32': 'v8_linux32_perf_try',
@@ -23,13 +20,19 @@ BOTS = {
}
DEFAULT_BOTS = [
+ 'v8_arm32_perf_try',
'v8_linux32_perf_try',
'v8_linux64_haswell_perf_try',
+ 'v8_nexus10_perf_try',
]
+V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
+
def main():
parser = argparse.ArgumentParser(description='')
- parser.add_argument("benchmarks", nargs="+", help="The benchmarks to run.")
+ parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
+ parser.add_argument('--extra-flags', default='',
+ help='Extra flags to be passed to the executable.')
for option in sorted(BOTS):
parser.add_argument(
option, dest='bots', action='append_const', const=BOTS[option],
@@ -39,31 +42,25 @@ def main():
print 'No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS)
options.bots = DEFAULT_BOTS
- cl = Changelist()
- if not cl.GetIssue():
- print 'Need to upload first'
+ if not options.benchmarks:
+ print 'Please specify the benchmarks to run as arguments.'
return 1
- props = cl.GetIssueProperties()
- if props.get('closed'):
- print 'Cannot send tryjobs for a closed CL'
- return 1
+ assert '"' not in options.extra_flags and '\'' not in options.extra_flags, (
+ 'Invalid flag specification.')
- if props.get('private'):
- print 'Cannot use trybots with private issue'
- return 1
+ # Ensure depot_tools are updated.
+ subprocess.check_output(
+ 'gclient', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
- if not options.benchmarks:
- print 'Please specify the benchmarks to run as arguments.'
- return 1
+ cmd = ['git cl try -m internal.client.v8']
+ cmd += ['-b %s' % bot for bot in options.bots]
+ benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
+ cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+ if options.extra_flags:
+ cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+ subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
- masters = {
- 'internal.client.v8': dict((b, options.benchmarks) for b in options.bots),
- }
- cl.RpcServer().trigger_distributed_try_jobs(
- cl.GetIssue(), cl.GetMostRecentPatchset(), cl.GetBranch(),
- False, None, masters)
- return 0
-if __name__ == "__main__": # pragma: no cover
+if __name__ == '__main__': # pragma: no cover
sys.exit(main())
diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py
index f3e5aff49f..1a4d73857a 100644
--- a/deps/v8/tools/unittests/run_perf_test.py
+++ b/deps/v8/tools/unittests/run_perf_test.py
@@ -10,7 +10,9 @@ from mock import DEFAULT
from mock import MagicMock
import os
from os import path, sys
+import platform
import shutil
+import subprocess
import tempfile
import unittest
@@ -129,6 +131,9 @@ class PerfTest(unittest.TestCase):
self.assertEquals(dirs.pop(), args[0])
os.chdir = MagicMock(side_effect=chdir)
+ subprocess.check_call = MagicMock()
+ platform.system = MagicMock(return_value='Linux')
+
def _CallMain(self, *args):
self._test_output = path.join(TEST_WORKSPACE, "results.json")
all_args=[
@@ -448,6 +453,19 @@ class PerfTest(unittest.TestCase):
(path.join("out-no-patch", "x64.release", "d7"), "--flag", "run.js"),
)
+ def testWrongBinaryWithProf(self):
+ test_input = dict(V8_JSON)
+ self._WriteTestInput(test_input)
+ self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
+ self.assertEquals(0, self._CallMain("--extra-flags=--prof"))
+ self._VerifyResults("test", "score", [
+ {"name": "Richards", "results": ["1.234"], "stddev": ""},
+ {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+ ])
+ self._VerifyErrors([])
+ self._VerifyMock(path.join("out", "x64.release", "d7"),
+ "--flag", "--prof", "run.js")
+
def testUnzip(self):
def Gen():
for i in [1, 2, 3]:
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index d1395f5d91..5a830c0e12 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -5,4 +5,4 @@ Try to write something funny. And please don't add trailing whitespace.
A Smi balks into a war and says:
"I'm so deoptimized today!"
The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up..
+The Smi looked at them when a crazy v8-autoroll account showed up.